aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c255
-rw-r--r--drivers/s390/block/dasd_3990_erp.c47
-rw-r--r--drivers/s390/block/dasd_alias.c77
-rw-r--r--drivers/s390/block/dasd_diag.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c183
-rw-r--r--drivers/s390/block/dasd_eckd.h4
-rw-r--r--drivers/s390/block/dasd_eer.c5
-rw-r--r--drivers/s390/block/dasd_fba.c11
-rw-r--r--drivers/s390/block/dasd_int.h13
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/dasd_proc.c7
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/fs3270.c12
-rw-r--r--drivers/s390/char/monreader.c11
-rw-r--r--drivers/s390/char/monwriter.c9
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp.c2
-rw-r--r--drivers/s390/char/sclp_async.c56
-rw-r--r--drivers/s390/char/sclp_cmd.c3
-rw-r--r--drivers/s390/char/sclp_quiesce.c48
-rw-r--r--drivers/s390/char/sclp_vt220.c30
-rw-r--r--drivers/s390/char/tape.h9
-rw-r--r--drivers/s390/char/tape_34xx.c8
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/char/tape_block.c20
-rw-r--r--drivers/s390/char/tape_char.c54
-rw-r--r--drivers/s390/char/tape_core.c65
-rw-r--r--drivers/s390/char/tape_proc.c2
-rw-r--r--drivers/s390/char/tty3270.c20
-rw-r--r--drivers/s390/char/vmlogrdr.c10
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/char/vmwatchdog.c29
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/blacklist.c13
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/ccwreq.c328
-rw-r--r--drivers/s390/cio/chp.c4
-rw-r--r--drivers/s390/cio/cio.c1
-rw-r--r--drivers/s390/cio/cio.h8
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c59
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c1019
-rw-r--r--drivers/s390/cio/device.h26
-rw-r--r--drivers/s390/cio/device_fsm.c442
-rw-r--r--drivers/s390/cio/device_id.c375
-rw-r--r--drivers/s390/cio/device_ops.c142
-rw-r--r--drivers/s390/cio/device_pgid.c963
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/io_sch.h73
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c31
-rw-r--r--drivers/s390/crypto/ap_bus.h18
-rw-r--r--drivers/s390/crypto/zcrypt_api.c11
-rw-r--r--drivers/s390/crypto/zcrypt_api.h2
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c75
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c2
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c51
-rw-r--r--drivers/s390/net/Makefile6
-rw-r--r--drivers/s390/net/claw.c82
-rw-r--r--drivers/s390/net/claw.h12
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c168
-rw-r--r--drivers/s390/net/ctcm_main.h20
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c11
-rw-r--r--drivers/s390/net/cu3088.c148
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/lcs.c115
-rw-r--r--drivers/s390/net/lcs.h18
-rw-r--r--drivers/s390/net/netiucv.c14
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c225
-rw-r--r--drivers/s390/net/qeth_core_mpc.h47
-rw-r--r--drivers/s390/net/qeth_core_sys.c83
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c144
-rw-r--r--drivers/s390/net/qeth_l3_sys.c67
-rw-r--r--drivers/s390/net/smsgiucv.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c406
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c195
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c15
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c134
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h10
-rw-r--r--drivers/s390/scsi/zfcp_def.h323
-rw-r--r--drivers/s390/scsi/zfcp_erp.c160
-rw-r--r--drivers/s390/scsi/zfcp_ext.h41
-rw-r--r--drivers/s390/scsi/zfcp_fc.c684
-rw-r--r--drivers/s390/scsi/zfcp_fc.h260
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c382
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h53
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c156
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c251
103 files changed, 4785 insertions, 4233 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index dad0449475b6..fdb2e7c14506 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -24,7 +24,6 @@
24#include <asm/ccwdev.h> 24#include <asm/ccwdev.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/idals.h> 26#include <asm/idals.h>
27#include <asm/todclk.h>
28#include <asm/itcw.h> 27#include <asm/itcw.h>
29 28
30/* This is ugly... */ 29/* This is ugly... */
@@ -64,6 +63,7 @@ static void do_restore_device(struct work_struct *);
64static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 63static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
65static void dasd_device_timeout(unsigned long); 64static void dasd_device_timeout(unsigned long);
66static void dasd_block_timeout(unsigned long); 65static void dasd_block_timeout(unsigned long);
66static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
67 67
68/* 68/*
69 * SECTION: Operations on the device structure. 69 * SECTION: Operations on the device structure.
@@ -960,7 +960,7 @@ static void dasd_device_timeout(unsigned long ptr)
960 device = (struct dasd_device *) ptr; 960 device = (struct dasd_device *) ptr;
961 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 961 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
962 /* re-activate request queue */ 962 /* re-activate request queue */
963 device->stopped &= ~DASD_STOPPED_PENDING; 963 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
964 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 964 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
965 dasd_schedule_device_bh(device); 965 dasd_schedule_device_bh(device);
966} 966}
@@ -994,10 +994,9 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
994 return; 994 return;
995 cqr = (struct dasd_ccw_req *) intparm; 995 cqr = (struct dasd_ccw_req *) intparm;
996 if (cqr->status != DASD_CQR_IN_IO) { 996 if (cqr->status != DASD_CQR_IN_IO) {
997 DBF_EVENT(DBF_DEBUG, 997 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
998 "invalid status in handle_killed_request: " 998 "invalid status in handle_killed_request: "
999 "bus_id %s, status %02x", 999 "%02x", cqr->status);
1000 dev_name(&cdev->dev), cqr->status);
1001 return; 1000 return;
1002 } 1001 }
1003 1002
@@ -1023,7 +1022,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
1023 /* First of all start sense subsystem status request. */ 1022 /* First of all start sense subsystem status request. */
1024 dasd_eer_snss(device); 1023 dasd_eer_snss(device);
1025 1024
1026 device->stopped &= ~DASD_STOPPED_PENDING; 1025 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1027 dasd_schedule_device_bh(device); 1026 dasd_schedule_device_bh(device);
1028 if (device->block) 1027 if (device->block)
1029 dasd_schedule_block_bh(device->block); 1028 dasd_schedule_block_bh(device->block);
@@ -1045,12 +1044,13 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1045 case -EIO: 1044 case -EIO:
1046 break; 1045 break;
1047 case -ETIMEDOUT: 1046 case -ETIMEDOUT:
1048 DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", 1047 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1049 __func__, dev_name(&cdev->dev)); 1048 "request timed out\n", __func__);
1050 break; 1049 break;
1051 default: 1050 default:
1052 DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", 1051 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1053 __func__, dev_name(&cdev->dev), PTR_ERR(irb)); 1052 "unknown error %ld\n", __func__,
1053 PTR_ERR(irb));
1054 } 1054 }
1055 dasd_handle_killed_request(cdev, intparm); 1055 dasd_handle_killed_request(cdev, intparm);
1056 return; 1056 return;
@@ -1405,6 +1405,20 @@ void dasd_schedule_device_bh(struct dasd_device *device)
1405 tasklet_hi_schedule(&device->tasklet); 1405 tasklet_hi_schedule(&device->tasklet);
1406} 1406}
1407 1407
1408void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1409{
1410 device->stopped |= bits;
1411}
1412EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1413
1414void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1415{
1416 device->stopped &= ~bits;
1417 if (!device->stopped)
1418 wake_up(&generic_waitq);
1419}
1420EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1421
1408/* 1422/*
1409 * Queue a request to the head of the device ccw_queue. 1423 * Queue a request to the head of the device ccw_queue.
1410 * Start the I/O if possible. 1424 * Start the I/O if possible.
@@ -1465,58 +1479,135 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1465} 1479}
1466 1480
1467/* 1481/*
1468 * Queue a request to the tail of the device ccw_queue and wait for 1482 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1469 * it's completion.
1470 */ 1483 */
1471int dasd_sleep_on(struct dasd_ccw_req *cqr) 1484static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1472{ 1485{
1473 struct dasd_device *device; 1486 struct dasd_device *device;
1474 int rc; 1487 dasd_erp_fn_t erp_fn;
1475 1488
1489 if (cqr->status == DASD_CQR_FILLED)
1490 return 0;
1476 device = cqr->startdev; 1491 device = cqr->startdev;
1492 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1493 if (cqr->status == DASD_CQR_TERMINATED) {
1494 device->discipline->handle_terminated_request(cqr);
1495 return 1;
1496 }
1497 if (cqr->status == DASD_CQR_NEED_ERP) {
1498 erp_fn = device->discipline->erp_action(cqr);
1499 erp_fn(cqr);
1500 return 1;
1501 }
1502 if (cqr->status == DASD_CQR_FAILED)
1503 dasd_log_sense(cqr, &cqr->irb);
1504 if (cqr->refers) {
1505 __dasd_process_erp(device, cqr);
1506 return 1;
1507 }
1508 }
1509 return 0;
1510}
1477 1511
1478 cqr->callback = dasd_wakeup_cb; 1512static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1479 cqr->callback_data = (void *) &generic_waitq; 1513{
1480 dasd_add_request_tail(cqr); 1514 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1481 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1515 if (cqr->refers) /* erp is not done yet */
1516 return 1;
1517 return ((cqr->status != DASD_CQR_DONE) &&
1518 (cqr->status != DASD_CQR_FAILED));
1519 } else
1520 return (cqr->status == DASD_CQR_FILLED);
1521}
1482 1522
1483 if (cqr->status == DASD_CQR_DONE) 1523static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1524{
1525 struct dasd_device *device;
1526 int rc;
1527 struct list_head ccw_queue;
1528 struct dasd_ccw_req *cqr;
1529
1530 INIT_LIST_HEAD(&ccw_queue);
1531 maincqr->status = DASD_CQR_FILLED;
1532 device = maincqr->startdev;
1533 list_add(&maincqr->blocklist, &ccw_queue);
1534 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
1535 cqr = list_first_entry(&ccw_queue,
1536 struct dasd_ccw_req, blocklist)) {
1537
1538 if (__dasd_sleep_on_erp(cqr))
1539 continue;
1540 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1541 continue;
1542
1543 /* Non-temporary stop condition will trigger fail fast */
1544 if (device->stopped & ~DASD_STOPPED_PENDING &&
1545 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1546 (!dasd_eer_enabled(device))) {
1547 cqr->status = DASD_CQR_FAILED;
1548 continue;
1549 }
1550
1551 /* Don't try to start requests if device is stopped */
1552 if (interruptible) {
1553 rc = wait_event_interruptible(
1554 generic_waitq, !(device->stopped));
1555 if (rc == -ERESTARTSYS) {
1556 cqr->status = DASD_CQR_FAILED;
1557 maincqr->intrc = rc;
1558 continue;
1559 }
1560 } else
1561 wait_event(generic_waitq, !(device->stopped));
1562
1563 cqr->callback = dasd_wakeup_cb;
1564 cqr->callback_data = (void *) &generic_waitq;
1565 dasd_add_request_tail(cqr);
1566 if (interruptible) {
1567 rc = wait_event_interruptible(
1568 generic_waitq, _wait_for_wakeup(cqr));
1569 if (rc == -ERESTARTSYS) {
1570 dasd_cancel_req(cqr);
1571 /* wait (non-interruptible) for final status */
1572 wait_event(generic_waitq,
1573 _wait_for_wakeup(cqr));
1574 cqr->status = DASD_CQR_FAILED;
1575 maincqr->intrc = rc;
1576 continue;
1577 }
1578 } else
1579 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1580 }
1581
1582 maincqr->endclk = get_clock();
1583 if ((maincqr->status != DASD_CQR_DONE) &&
1584 (maincqr->intrc != -ERESTARTSYS))
1585 dasd_log_sense(maincqr, &maincqr->irb);
1586 if (maincqr->status == DASD_CQR_DONE)
1484 rc = 0; 1587 rc = 0;
1485 else if (cqr->intrc) 1588 else if (maincqr->intrc)
1486 rc = cqr->intrc; 1589 rc = maincqr->intrc;
1487 else 1590 else
1488 rc = -EIO; 1591 rc = -EIO;
1489 return rc; 1592 return rc;
1490} 1593}
1491 1594
1492/* 1595/*
1596 * Queue a request to the tail of the device ccw_queue and wait for
1597 * it's completion.
1598 */
1599int dasd_sleep_on(struct dasd_ccw_req *cqr)
1600{
1601 return _dasd_sleep_on(cqr, 0);
1602}
1603
1604/*
1493 * Queue a request to the tail of the device ccw_queue and wait 1605 * Queue a request to the tail of the device ccw_queue and wait
1494 * interruptible for it's completion. 1606 * interruptible for it's completion.
1495 */ 1607 */
1496int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1608int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1497{ 1609{
1498 struct dasd_device *device; 1610 return _dasd_sleep_on(cqr, 1);
1499 int rc;
1500
1501 device = cqr->startdev;
1502 cqr->callback = dasd_wakeup_cb;
1503 cqr->callback_data = (void *) &generic_waitq;
1504 dasd_add_request_tail(cqr);
1505 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
1506 if (rc == -ERESTARTSYS) {
1507 dasd_cancel_req(cqr);
1508 /* wait (non-interruptible) for final status */
1509 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1510 cqr->intrc = rc;
1511 }
1512
1513 if (cqr->status == DASD_CQR_DONE)
1514 rc = 0;
1515 else if (cqr->intrc)
1516 rc = cqr->intrc;
1517 else
1518 rc = -EIO;
1519 return rc;
1520} 1611}
1521 1612
1522/* 1613/*
@@ -1630,7 +1721,7 @@ static void dasd_block_timeout(unsigned long ptr)
1630 block = (struct dasd_block *) ptr; 1721 block = (struct dasd_block *) ptr;
1631 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1722 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1632 /* re-activate request queue */ 1723 /* re-activate request queue */
1633 block->base->stopped &= ~DASD_STOPPED_PENDING; 1724 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1634 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1725 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1635 dasd_schedule_block_bh(block); 1726 dasd_schedule_block_bh(block);
1636} 1727}
@@ -1657,11 +1748,10 @@ void dasd_block_clear_timer(struct dasd_block *block)
1657/* 1748/*
1658 * Process finished error recovery ccw. 1749 * Process finished error recovery ccw.
1659 */ 1750 */
1660static inline void __dasd_block_process_erp(struct dasd_block *block, 1751static void __dasd_process_erp(struct dasd_device *device,
1661 struct dasd_ccw_req *cqr) 1752 struct dasd_ccw_req *cqr)
1662{ 1753{
1663 dasd_erp_fn_t erp_fn; 1754 dasd_erp_fn_t erp_fn;
1664 struct dasd_device *device = block->base;
1665 1755
1666 if (cqr->status == DASD_CQR_DONE) 1756 if (cqr->status == DASD_CQR_DONE)
1667 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1757 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
@@ -1725,9 +1815,12 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1725 */ 1815 */
1726 if (!list_empty(&block->ccw_queue)) 1816 if (!list_empty(&block->ccw_queue))
1727 break; 1817 break;
1728 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1818 spin_lock_irqsave(
1729 basedev->stopped |= DASD_STOPPED_PENDING; 1819 get_ccwdev_lock(basedev->cdev), flags);
1730 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1820 dasd_device_set_stop_bits(basedev,
1821 DASD_STOPPED_PENDING);
1822 spin_unlock_irqrestore(
1823 get_ccwdev_lock(basedev->cdev), flags);
1731 dasd_block_set_timer(block, HZ/2); 1824 dasd_block_set_timer(block, HZ/2);
1732 break; 1825 break;
1733 } 1826 }
@@ -1813,7 +1906,7 @@ restart:
1813 cqr->status = DASD_CQR_FILLED; 1906 cqr->status = DASD_CQR_FILLED;
1814 cqr->retries = 255; 1907 cqr->retries = 255;
1815 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1908 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1816 base->stopped |= DASD_STOPPED_QUIESCE; 1909 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1817 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1910 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1818 flags); 1911 flags);
1819 goto restart; 1912 goto restart;
@@ -1821,7 +1914,7 @@ restart:
1821 1914
1822 /* Process finished ERP request. */ 1915 /* Process finished ERP request. */
1823 if (cqr->refers) { 1916 if (cqr->refers) {
1824 __dasd_block_process_erp(block, cqr); 1917 __dasd_process_erp(base, cqr);
1825 goto restart; 1918 goto restart;
1826 } 1919 }
1827 1920
@@ -1952,7 +2045,7 @@ restart_cb:
1952 /* Process finished ERP request. */ 2045 /* Process finished ERP request. */
1953 if (cqr->refers) { 2046 if (cqr->refers) {
1954 spin_lock_bh(&block->queue_lock); 2047 spin_lock_bh(&block->queue_lock);
1955 __dasd_block_process_erp(block, cqr); 2048 __dasd_process_erp(block->base, cqr);
1956 spin_unlock_bh(&block->queue_lock); 2049 spin_unlock_bh(&block->queue_lock);
1957 /* restart list_for_xx loop since dasd_process_erp 2050 /* restart list_for_xx loop since dasd_process_erp
1958 * might remove multiple elements */ 2051 * might remove multiple elements */
@@ -2208,18 +2301,11 @@ int dasd_generic_probe(struct ccw_device *cdev,
2208{ 2301{
2209 int ret; 2302 int ret;
2210 2303
2211 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
2212 if (ret) {
2213 DBF_EVENT(DBF_WARNING,
2214 "dasd_generic_probe: could not set ccw-device options "
2215 "for %s\n", dev_name(&cdev->dev));
2216 return ret;
2217 }
2218 ret = dasd_add_sysfs_files(cdev); 2304 ret = dasd_add_sysfs_files(cdev);
2219 if (ret) { 2305 if (ret) {
2220 DBF_EVENT(DBF_WARNING, 2306 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2221 "dasd_generic_probe: could not add sysfs entries " 2307 "dasd_generic_probe: could not add "
2222 "for %s\n", dev_name(&cdev->dev)); 2308 "sysfs entries");
2223 return ret; 2309 return ret;
2224 } 2310 }
2225 cdev->handler = &dasd_int_handler; 2311 cdev->handler = &dasd_int_handler;
@@ -2418,16 +2504,16 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
2418 cqr->status = DASD_CQR_QUEUED; 2504 cqr->status = DASD_CQR_QUEUED;
2419 cqr->retries++; 2505 cqr->retries++;
2420 } 2506 }
2421 device->stopped |= DASD_STOPPED_DC_WAIT; 2507 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2422 dasd_device_clear_timer(device); 2508 dasd_device_clear_timer(device);
2423 dasd_schedule_device_bh(device); 2509 dasd_schedule_device_bh(device);
2424 ret = 1; 2510 ret = 1;
2425 break; 2511 break;
2426 case CIO_OPER: 2512 case CIO_OPER:
2427 /* FIXME: add a sanity check. */ 2513 /* FIXME: add a sanity check. */
2428 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2514 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2429 if (device->stopped & DASD_UNRESUMED_PM) { 2515 if (device->stopped & DASD_UNRESUMED_PM) {
2430 device->stopped &= ~DASD_UNRESUMED_PM; 2516 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2431 dasd_restore_device(device); 2517 dasd_restore_device(device);
2432 ret = 1; 2518 ret = 1;
2433 break; 2519 break;
@@ -2452,7 +2538,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
2452 if (IS_ERR(device)) 2538 if (IS_ERR(device))
2453 return PTR_ERR(device); 2539 return PTR_ERR(device);
2454 /* disallow new I/O */ 2540 /* disallow new I/O */
2455 device->stopped |= DASD_STOPPED_PM; 2541 dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2456 /* clear active requests */ 2542 /* clear active requests */
2457 INIT_LIST_HEAD(&freeze_queue); 2543 INIT_LIST_HEAD(&freeze_queue);
2458 spin_lock_irq(get_ccwdev_lock(cdev)); 2544 spin_lock_irq(get_ccwdev_lock(cdev));
@@ -2504,22 +2590,27 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
2504 return PTR_ERR(device); 2590 return PTR_ERR(device);
2505 2591
2506 /* allow new IO again */ 2592 /* allow new IO again */
2507 device->stopped &= ~DASD_STOPPED_PM; 2593 dasd_device_remove_stop_bits(device,
2508 device->stopped &= ~DASD_UNRESUMED_PM; 2594 (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2509 2595
2510 dasd_schedule_device_bh(device); 2596 dasd_schedule_device_bh(device);
2511 if (device->block)
2512 dasd_schedule_block_bh(device->block);
2513 2597
2514 if (device->discipline->restore) 2598 /*
2599 * call discipline restore function
2600 * if device is stopped do nothing e.g. for disconnected devices
2601 */
2602 if (device->discipline->restore && !(device->stopped))
2515 rc = device->discipline->restore(device); 2603 rc = device->discipline->restore(device);
2516 if (rc) 2604 if (rc || device->stopped)
2517 /* 2605 /*
2518 * if the resume failed for the DASD we put it in 2606 * if the resume failed for the DASD we put it in
2519 * an UNRESUMED stop state 2607 * an UNRESUMED stop state
2520 */ 2608 */
2521 device->stopped |= DASD_UNRESUMED_PM; 2609 device->stopped |= DASD_UNRESUMED_PM;
2522 2610
2611 if (device->block)
2612 dasd_schedule_block_bh(device->block);
2613
2523 dasd_put_device(device); 2614 dasd_put_device(device);
2524 return 0; 2615 return 0;
2525} 2616}
@@ -2532,6 +2623,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2532{ 2623{
2533 struct dasd_ccw_req *cqr; 2624 struct dasd_ccw_req *cqr;
2534 struct ccw1 *ccw; 2625 struct ccw1 *ccw;
2626 unsigned long *idaw;
2535 2627
2536 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2628 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2537 2629
@@ -2545,14 +2637,21 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2545 2637
2546 ccw = cqr->cpaddr; 2638 ccw = cqr->cpaddr;
2547 ccw->cmd_code = CCW_CMD_RDC; 2639 ccw->cmd_code = CCW_CMD_RDC;
2548 ccw->cda = (__u32)(addr_t)rdc_buffer; 2640 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2549 ccw->count = rdc_buffer_size; 2641 idaw = (unsigned long *) (cqr->data);
2642 ccw->cda = (__u32)(addr_t) idaw;
2643 ccw->flags = CCW_FLAG_IDA;
2644 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2645 } else {
2646 ccw->cda = (__u32)(addr_t) rdc_buffer;
2647 ccw->flags = 0;
2648 }
2550 2649
2650 ccw->count = rdc_buffer_size;
2551 cqr->startdev = device; 2651 cqr->startdev = device;
2552 cqr->memdev = device; 2652 cqr->memdev = device;
2553 cqr->expires = 10*HZ; 2653 cqr->expires = 10*HZ;
2554 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2654 cqr->retries = 256;
2555 cqr->retries = 2;
2556 cqr->buildclk = get_clock(); 2655 cqr->buildclk = get_clock();
2557 cqr->status = DASD_CQR_FILLED; 2656 cqr->status = DASD_CQR_FILLED;
2558 return cqr; 2657 return cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index e8ff7b0c961d..44796ba4eb9b 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -12,7 +12,6 @@
12#include <linux/timer.h> 12#include <linux/timer.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <asm/idals.h> 14#include <asm/idals.h>
15#include <asm/todclk.h>
16 15
17#define PRINTK_HEADER "dasd_erp(3990): " 16#define PRINTK_HEADER "dasd_erp(3990): "
18 17
@@ -70,8 +69,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
70 * processing until the started timer has expired or an related 69 * processing until the started timer has expired or an related
71 * interrupt was received. 70 * interrupt was received.
72 */ 71 */
73static void 72static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
74dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
75{ 73{
76 74
77 struct dasd_device *device = erp->startdev; 75 struct dasd_device *device = erp->startdev;
@@ -81,10 +79,13 @@ dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
81 "blocking request queue for %is", expires/HZ); 79 "blocking request queue for %is", expires/HZ);
82 80
83 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 81 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
84 device->stopped |= DASD_STOPPED_PENDING; 82 dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
85 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 83 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
86 erp->status = DASD_CQR_FILLED; 84 erp->status = DASD_CQR_FILLED;
87 dasd_block_set_timer(device->block, expires); 85 if (erp->block)
86 dasd_block_set_timer(erp->block, expires);
87 else
88 dasd_device_set_timer(device, expires);
88} 89}
89 90
90/* 91/*
@@ -243,9 +244,13 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
243 * DESCRIPTION 244 * DESCRIPTION
244 * Setup ERP to do the ERP action 1 (see Reference manual). 245 * Setup ERP to do the ERP action 1 (see Reference manual).
245 * Repeat the operation on a different channel path. 246 * Repeat the operation on a different channel path.
246 * If all alternate paths have been tried, the request is posted with a 247 * As deviation from the recommended recovery action, we reset the path mask
247 * permanent error. 248 * after we have tried each path and go through all paths a second time.
248 * Note: duplex handling is not implemented (yet). 249 * This will cover situations where only one path at a time is actually down,
250 * but all paths fail and recover just with the same sequence and timing as
251 * we try to use them (flapping links).
252 * If all alternate paths have been tried twice, the request is posted with
253 * a permanent error.
249 * 254 *
250 * PARAMETER 255 * PARAMETER
251 * erp pointer to the current ERP 256 * erp pointer to the current ERP
@@ -254,17 +259,25 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
254 * erp pointer to the ERP 259 * erp pointer to the ERP
255 * 260 *
256 */ 261 */
257static struct dasd_ccw_req * 262static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
258dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
259{ 263{
264 erp->function = dasd_3990_erp_action_1_sec;
265 dasd_3990_erp_alternate_path(erp);
266 return erp;
267}
260 268
269static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
270{
261 erp->function = dasd_3990_erp_action_1; 271 erp->function = dasd_3990_erp_action_1;
262
263 dasd_3990_erp_alternate_path(erp); 272 dasd_3990_erp_alternate_path(erp);
264 273 if (erp->status == DASD_CQR_FAILED) {
274 erp->status = DASD_CQR_FILLED;
275 erp->retries = 10;
276 erp->lpm = LPM_ANYPATH;
277 erp->function = dasd_3990_erp_action_1_sec;
278 }
265 return erp; 279 return erp;
266 280} /* end dasd_3990_erp_action_1(b) */
267} /* end dasd_3990_erp_action_1 */
268 281
269/* 282/*
270 * DASD_3990_ERP_ACTION_4 283 * DASD_3990_ERP_ACTION_4
@@ -2295,6 +2308,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2295 return cqr; 2308 return cqr;
2296 } 2309 }
2297 2310
2311 ccw = cqr->cpaddr;
2298 if (cqr->cpmode == 1) { 2312 if (cqr->cpmode == 1) {
2299 /* make a shallow copy of the original tcw but set new tsb */ 2313 /* make a shallow copy of the original tcw but set new tsb */
2300 erp->cpmode = 1; 2314 erp->cpmode = 1;
@@ -2303,6 +2317,9 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2303 tsb = (struct tsb *) &tcw[1]; 2317 tsb = (struct tsb *) &tcw[1];
2304 *tcw = *((struct tcw *)cqr->cpaddr); 2318 *tcw = *((struct tcw *)cqr->cpaddr);
2305 tcw->tsb = (long)tsb; 2319 tcw->tsb = (long)tsb;
2320 } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
2321 /* PSF cannot be chained from NOOP/TIC */
2322 erp->cpaddr = cqr->cpaddr;
2306 } else { 2323 } else {
2307 /* initialize request with default TIC to current ERP/CQR */ 2324 /* initialize request with default TIC to current ERP/CQR */
2308 ccw = erp->cpaddr; 2325 ccw = erp->cpaddr;
@@ -2487,6 +2504,8 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2487 2504
2488 erp = dasd_3990_erp_action_1(erp); 2505 erp = dasd_3990_erp_action_1(erp);
2489 2506
2507 } else if (erp->function == dasd_3990_erp_action_1_sec) {
2508 erp = dasd_3990_erp_action_1_sec(erp);
2490 } else if (erp->function == dasd_3990_erp_action_5) { 2509 } else if (erp->function == dasd_3990_erp_action_5) {
2491 2510
2492 /* retries have not been successful */ 2511 /* retries have not been successful */
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 70a008c00522..fd1231738ef4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -152,6 +152,7 @@ static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work); 152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work); 153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
154 spin_lock_init(&lcu->lock); 154 spin_lock_init(&lcu->lock);
155 init_completion(&lcu->lcu_setup);
155 return lcu; 156 return lcu;
156 157
157out_err4: 158out_err4:
@@ -240,6 +241,67 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
240} 241}
241 242
242/* 243/*
244 * The first device to be registered on an LCU will have to do
245 * some additional setup steps to configure that LCU on the
246 * storage server. All further devices should wait with their
247 * initialization until the first device is done.
248 * To synchronize this work, the first device will call
249 * dasd_alias_lcu_setup_complete when it is done, and all
250 * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
251 */
252void dasd_alias_lcu_setup_complete(struct dasd_device *device)
253{
254 struct dasd_eckd_private *private;
255 unsigned long flags;
256 struct alias_server *server;
257 struct alias_lcu *lcu;
258 struct dasd_uid *uid;
259
260 private = (struct dasd_eckd_private *) device->private;
261 uid = &private->uid;
262 lcu = NULL;
263 spin_lock_irqsave(&aliastree.lock, flags);
264 server = _find_server(uid);
265 if (server)
266 lcu = _find_lcu(server, uid);
267 spin_unlock_irqrestore(&aliastree.lock, flags);
268 if (!lcu) {
269 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
270 "could not find lcu for %04x %02x",
271 uid->ssid, uid->real_unit_addr);
272 WARN_ON(1);
273 return;
274 }
275 complete_all(&lcu->lcu_setup);
276}
277
278void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
279{
280 struct dasd_eckd_private *private;
281 unsigned long flags;
282 struct alias_server *server;
283 struct alias_lcu *lcu;
284 struct dasd_uid *uid;
285
286 private = (struct dasd_eckd_private *) device->private;
287 uid = &private->uid;
288 lcu = NULL;
289 spin_lock_irqsave(&aliastree.lock, flags);
290 server = _find_server(uid);
291 if (server)
292 lcu = _find_lcu(server, uid);
293 spin_unlock_irqrestore(&aliastree.lock, flags);
294 if (!lcu) {
295 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
296 "could not find lcu for %04x %02x",
297 uid->ssid, uid->real_unit_addr);
298 WARN_ON(1);
299 return;
300 }
301 wait_for_completion(&lcu->lcu_setup);
302}
303
304/*
243 * This function removes a device from the scope of alias management. 305 * This function removes a device from the scope of alias management.
244 * The complicated part is to make sure that it is not in use by 306 * The complicated part is to make sure that it is not in use by
245 * any of the workers. If necessary cancel the work. 307 * any of the workers. If necessary cancel the work.
@@ -755,11 +817,11 @@ static void __stop_device_on_lcu(struct dasd_device *device,
755{ 817{
756 /* If pos == device then device is already locked! */ 818 /* If pos == device then device is already locked! */
757 if (pos == device) { 819 if (pos == device) {
758 pos->stopped |= DASD_STOPPED_SU; 820 dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
759 return; 821 return;
760 } 822 }
761 spin_lock(get_ccwdev_lock(pos->cdev)); 823 spin_lock(get_ccwdev_lock(pos->cdev));
762 pos->stopped |= DASD_STOPPED_SU; 824 dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
763 spin_unlock(get_ccwdev_lock(pos->cdev)); 825 spin_unlock(get_ccwdev_lock(pos->cdev));
764} 826}
765 827
@@ -793,26 +855,26 @@ static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
793 855
794 list_for_each_entry(device, &lcu->active_devices, alias_list) { 856 list_for_each_entry(device, &lcu->active_devices, alias_list) {
795 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 857 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
796 device->stopped &= ~DASD_STOPPED_SU; 858 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
797 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 859 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
798 } 860 }
799 861
800 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 862 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 863 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 device->stopped &= ~DASD_STOPPED_SU; 864 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
803 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 865 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
804 } 866 }
805 867
806 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 868 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
807 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 869 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
808 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 870 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
809 device->stopped &= ~DASD_STOPPED_SU; 871 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
810 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 872 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
811 flags); 873 flags);
812 } 874 }
813 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { 875 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
814 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 876 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
815 device->stopped &= ~DASD_STOPPED_SU; 877 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
816 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 878 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
817 flags); 879 flags);
818 } 880 }
@@ -836,7 +898,8 @@ static void summary_unit_check_handling_work(struct work_struct *work)
836 898
837 /* 2. reset summary unit check */ 899 /* 2. reset summary unit check */
838 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 900 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
839 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING); 901 dasd_device_remove_stop_bits(device,
902 (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
840 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 903 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
841 reset_summary_unit_check(lcu, device, suc_data->reason); 904 reset_summary_unit_check(lcu, device, suc_data->reason);
842 905
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 4e49b4a6c880..f64d0db881b4 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -24,7 +24,6 @@
24#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/s390_ext.h> 26#include <asm/s390_ext.h>
27#include <asm/todclk.h>
28#include <asm/vtoc.h> 27#include <asm/vtoc.h>
29#include <asm/diag.h> 28#include <asm/diag.h>
30 29
@@ -145,6 +144,15 @@ dasd_diag_erp(struct dasd_device *device)
145 144
146 mdsk_term_io(device); 145 mdsk_term_io(device);
147 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL); 146 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
147 if (rc == 4) {
148 if (!(device->features & DASD_FEATURE_READONLY)) {
149 dev_warn(&device->cdev->dev,
150 "The access mode of a DIAG device changed"
151 " to read-only");
152 device->features |= DASD_FEATURE_READONLY;
153 }
154 rc = 0;
155 }
148 if (rc) 156 if (rc)
149 dev_warn(&device->cdev->dev, "DIAG ERP failed with " 157 dev_warn(&device->cdev->dev, "DIAG ERP failed with "
150 "rc=%d\n", rc); 158 "rc=%d\n", rc);
@@ -433,16 +441,20 @@ dasd_diag_check_device(struct dasd_device *device)
433 for (sb = 512; sb < bsize; sb = sb << 1) 441 for (sb = 512; sb < bsize; sb = sb << 1)
434 block->s2b_shift++; 442 block->s2b_shift++;
435 rc = mdsk_init_io(device, block->bp_block, 0, NULL); 443 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
436 if (rc) { 444 if (rc && (rc != 4)) {
437 dev_warn(&device->cdev->dev, "DIAG initialization " 445 dev_warn(&device->cdev->dev, "DIAG initialization "
438 "failed with rc=%d\n", rc); 446 "failed with rc=%d\n", rc);
439 rc = -EIO; 447 rc = -EIO;
440 } else { 448 } else {
449 if (rc == 4)
450 device->features |= DASD_FEATURE_READONLY;
441 dev_info(&device->cdev->dev, 451 dev_info(&device->cdev->dev,
442 "New DASD with %ld byte/block, total size %ld KB\n", 452 "New DASD with %ld byte/block, total size %ld KB%s\n",
443 (unsigned long) block->bp_block, 453 (unsigned long) block->bp_block,
444 (unsigned long) (block->blocks << 454 (unsigned long) (block->blocks <<
445 block->s2b_shift) >> 1); 455 block->s2b_shift) >> 1,
456 (rc == 4) ? ", read-only device" : "");
457 rc = 0;
446 } 458 }
447out_label: 459out_label:
448 free_page((long) label); 460 free_page((long) label);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index ab3521755588..5819dc02a143 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -24,7 +24,6 @@
24#include <asm/idals.h> 24#include <asm/idals.h>
25#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/todclk.h>
28#include <asm/uaccess.h> 27#include <asm/uaccess.h>
29#include <asm/cio.h> 28#include <asm/cio.h>
30#include <asm/ccwdev.h> 29#include <asm/ccwdev.h>
@@ -78,6 +77,11 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
78 77
79static struct ccw_driver dasd_eckd_driver; /* see below */ 78static struct ccw_driver dasd_eckd_driver; /* see below */
80 79
80#define INIT_CQR_OK 0
81#define INIT_CQR_UNFORMATTED 1
82#define INIT_CQR_ERROR 2
83
84
81/* initial attempt at a probe function. this can be simplified once 85/* initial attempt at a probe function. this can be simplified once
82 * the other detection code is gone */ 86 * the other detection code is gone */
83static int 87static int
@@ -86,11 +90,12 @@ dasd_eckd_probe (struct ccw_device *cdev)
86 int ret; 90 int ret;
87 91
88 /* set ECKD specific ccw-device options */ 92 /* set ECKD specific ccw-device options */
89 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); 93 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
94 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
90 if (ret) { 95 if (ret) {
91 DBF_EVENT(DBF_WARNING, 96 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
92 "dasd_eckd_probe: could not set ccw-device options " 97 "dasd_eckd_probe: could not set "
93 "for %s\n", dev_name(&cdev->dev)); 98 "ccw-device options");
94 return ret; 99 return ret;
95 } 100 }
96 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 101 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
@@ -749,8 +754,7 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
749 cqr->block = NULL; 754 cqr->block = NULL;
750 cqr->expires = 10*HZ; 755 cqr->expires = 10*HZ;
751 cqr->lpm = lpm; 756 cqr->lpm = lpm;
752 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 757 cqr->retries = 256;
753 cqr->retries = 2;
754 cqr->buildclk = get_clock(); 758 cqr->buildclk = get_clock();
755 cqr->status = DASD_CQR_FILLED; 759 cqr->status = DASD_CQR_FILLED;
756 return cqr; 760 return cqr;
@@ -885,16 +889,15 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
885 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 889 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
886 &conf_len, lpm); 890 &conf_len, lpm);
887 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 891 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
888 DBF_EVENT(DBF_WARNING, 892 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
889 "Read configuration data returned " 893 "Read configuration data returned "
890 "error %d for device: %s", rc, 894 "error %d", rc);
891 dev_name(&device->cdev->dev));
892 return rc; 895 return rc;
893 } 896 }
894 if (conf_data == NULL) { 897 if (conf_data == NULL) {
895 DBF_EVENT(DBF_WARNING, "No configuration " 898 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
896 "data retrieved for device: %s", 899 "No configuration data "
897 dev_name(&device->cdev->dev)); 900 "retrieved");
898 continue; /* no error */ 901 continue; /* no error */
899 } 902 }
900 /* save first valid configuration data */ 903 /* save first valid configuration data */
@@ -941,16 +944,14 @@ static int dasd_eckd_read_features(struct dasd_device *device)
941 sizeof(struct dasd_rssd_features)), 944 sizeof(struct dasd_rssd_features)),
942 device); 945 device);
943 if (IS_ERR(cqr)) { 946 if (IS_ERR(cqr)) {
944 DBF_EVENT(DBF_WARNING, "Could not allocate initialization " 947 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
945 "request for device: %s", 948 "allocate initialization request");
946 dev_name(&device->cdev->dev));
947 return PTR_ERR(cqr); 949 return PTR_ERR(cqr);
948 } 950 }
949 cqr->startdev = device; 951 cqr->startdev = device;
950 cqr->memdev = device; 952 cqr->memdev = device;
951 cqr->block = NULL; 953 cqr->block = NULL;
952 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 954 cqr->retries = 256;
953 cqr->retries = 5;
954 cqr->expires = 10 * HZ; 955 cqr->expires = 10 * HZ;
955 956
956 /* Prepare for Read Subsystem Data */ 957 /* Prepare for Read Subsystem Data */
@@ -1012,9 +1013,9 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1012 } 1013 }
1013 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1014 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1014 psf_ssc_data->order = PSF_ORDER_SSC; 1015 psf_ssc_data->order = PSF_ORDER_SSC;
1015 psf_ssc_data->suborder = 0x40; 1016 psf_ssc_data->suborder = 0xc0;
1016 if (enable_pav) { 1017 if (enable_pav) {
1017 psf_ssc_data->suborder |= 0x88; 1018 psf_ssc_data->suborder |= 0x08;
1018 psf_ssc_data->reserved[0] = 0x88; 1019 psf_ssc_data->reserved[0] = 0x88;
1019 } 1020 }
1020 ccw = cqr->cpaddr; 1021 ccw = cqr->cpaddr;
@@ -1025,6 +1026,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1025 cqr->startdev = device; 1026 cqr->startdev = device;
1026 cqr->memdev = device; 1027 cqr->memdev = device;
1027 cqr->block = NULL; 1028 cqr->block = NULL;
1029 cqr->retries = 256;
1028 cqr->expires = 10*HZ; 1030 cqr->expires = 10*HZ;
1029 cqr->buildclk = get_clock(); 1031 cqr->buildclk = get_clock();
1030 cqr->status = DASD_CQR_FILLED; 1032 cqr->status = DASD_CQR_FILLED;
@@ -1057,7 +1059,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1057/* 1059/*
1058 * Valide storage server of current device. 1060 * Valide storage server of current device.
1059 */ 1061 */
1060static int dasd_eckd_validate_server(struct dasd_device *device) 1062static void dasd_eckd_validate_server(struct dasd_device *device)
1061{ 1063{
1062 int rc; 1064 int rc;
1063 struct dasd_eckd_private *private; 1065 struct dasd_eckd_private *private;
@@ -1068,15 +1070,12 @@ static int dasd_eckd_validate_server(struct dasd_device *device)
1068 else 1070 else
1069 enable_pav = 1; 1071 enable_pav = 1;
1070 rc = dasd_eckd_psf_ssc(device, enable_pav); 1072 rc = dasd_eckd_psf_ssc(device, enable_pav);
1073
1071 /* may be requested feature is not available on server, 1074 /* may be requested feature is not available on server,
1072 * therefore just report error and go ahead */ 1075 * therefore just report error and go ahead */
1073 private = (struct dasd_eckd_private *) device->private; 1076 private = (struct dasd_eckd_private *) device->private;
1074 DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x " 1077 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1075 "returned rc=%d for device: %s", 1078 "returned rc=%d", private->uid.ssid, rc);
1076 private->uid.vendor, private->uid.serial,
1077 private->uid.ssid, rc, dev_name(&device->cdev->dev));
1078 /* RE-Read Configuration Data */
1079 return dasd_eckd_read_conf(device);
1080} 1079}
1081 1080
1082/* 1081/*
@@ -1090,6 +1089,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1090 struct dasd_block *block; 1089 struct dasd_block *block;
1091 int is_known, rc; 1090 int is_known, rc;
1092 1091
1092 if (!ccw_device_is_pathgroup(device->cdev)) {
1093 dev_warn(&device->cdev->dev,
1094 "A channel path group could not be established\n");
1095 return -EIO;
1096 }
1097 if (!ccw_device_is_multipath(device->cdev)) {
1098 dev_info(&device->cdev->dev,
1099 "The DASD is not operating in multipath mode\n");
1100 }
1093 private = (struct dasd_eckd_private *) device->private; 1101 private = (struct dasd_eckd_private *) device->private;
1094 if (!private) { 1102 if (!private) {
1095 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 1103 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
@@ -1123,9 +1131,9 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1123 if (private->uid.type == UA_BASE_DEVICE) { 1131 if (private->uid.type == UA_BASE_DEVICE) {
1124 block = dasd_alloc_block(); 1132 block = dasd_alloc_block();
1125 if (IS_ERR(block)) { 1133 if (IS_ERR(block)) {
1126 DBF_EVENT(DBF_WARNING, "could not allocate dasd " 1134 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1127 "block structure for device: %s", 1135 "could not allocate dasd "
1128 dev_name(&device->cdev->dev)); 1136 "block structure");
1129 rc = PTR_ERR(block); 1137 rc = PTR_ERR(block);
1130 goto out_err1; 1138 goto out_err1;
1131 } 1139 }
@@ -1139,12 +1147,21 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1139 rc = is_known; 1147 rc = is_known;
1140 goto out_err2; 1148 goto out_err2;
1141 } 1149 }
1150 /*
1151 * dasd_eckd_vaildate_server is done on the first device that
1152 * is found for an LCU. All later other devices have to wait
1153 * for it, so they will read the correct feature codes.
1154 */
1142 if (!is_known) { 1155 if (!is_known) {
1143 /* new lcu found */ 1156 dasd_eckd_validate_server(device);
1144 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 1157 dasd_alias_lcu_setup_complete(device);
1145 if (rc) 1158 } else
1146 goto out_err3; 1159 dasd_alias_wait_for_lcu_setup(device);
1147 } 1160
1161 /* device may report different configuration data after LCU setup */
1162 rc = dasd_eckd_read_conf(device);
1163 if (rc)
1164 goto out_err3;
1148 1165
1149 /* Read Feature Codes */ 1166 /* Read Feature Codes */
1150 dasd_eckd_read_features(device); 1167 dasd_eckd_read_features(device);
@@ -1153,9 +1170,8 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1153 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 1170 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1154 &private->rdc_data, 64); 1171 &private->rdc_data, 64);
1155 if (rc) { 1172 if (rc) {
1156 DBF_EVENT(DBF_WARNING, 1173 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1157 "Read device characteristics failed, rc=%d for " 1174 "Read device characteristic failed, rc=%d", rc);
1158 "device: %s", rc, dev_name(&device->cdev->dev));
1159 goto out_err3; 1175 goto out_err3;
1160 } 1176 }
1161 /* find the vaild cylinder size */ 1177 /* find the vaild cylinder size */
@@ -1256,12 +1272,29 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1256 cqr->block = NULL; 1272 cqr->block = NULL;
1257 cqr->startdev = device; 1273 cqr->startdev = device;
1258 cqr->memdev = device; 1274 cqr->memdev = device;
1259 cqr->retries = 0; 1275 cqr->retries = 255;
1260 cqr->buildclk = get_clock(); 1276 cqr->buildclk = get_clock();
1261 cqr->status = DASD_CQR_FILLED; 1277 cqr->status = DASD_CQR_FILLED;
1262 return cqr; 1278 return cqr;
1263} 1279}
1264 1280
1281/* differentiate between 'no record found' and any other error */
1282static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1283{
1284 char *sense;
1285 if (init_cqr->status == DASD_CQR_DONE)
1286 return INIT_CQR_OK;
1287 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1288 init_cqr->status == DASD_CQR_FAILED) {
1289 sense = dasd_get_sense(&init_cqr->irb);
1290 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1291 return INIT_CQR_UNFORMATTED;
1292 else
1293 return INIT_CQR_ERROR;
1294 } else
1295 return INIT_CQR_ERROR;
1296}
1297
1265/* 1298/*
1266 * This is the callback function for the init_analysis cqr. It saves 1299 * This is the callback function for the init_analysis cqr. It saves
1267 * the status of the initial analysis ccw before it frees it and kicks 1300 * the status of the initial analysis ccw before it frees it and kicks
@@ -1269,21 +1302,20 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
1269 * dasd_eckd_do_analysis again (if the devices has not been marked 1302 * dasd_eckd_do_analysis again (if the devices has not been marked
1270 * for deletion in the meantime). 1303 * for deletion in the meantime).
1271 */ 1304 */
1272static void 1305static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1273dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) 1306 void *data)
1274{ 1307{
1275 struct dasd_eckd_private *private; 1308 struct dasd_eckd_private *private;
1276 struct dasd_device *device; 1309 struct dasd_device *device;
1277 1310
1278 device = init_cqr->startdev; 1311 device = init_cqr->startdev;
1279 private = (struct dasd_eckd_private *) device->private; 1312 private = (struct dasd_eckd_private *) device->private;
1280 private->init_cqr_status = init_cqr->status; 1313 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1281 dasd_sfree_request(init_cqr, device); 1314 dasd_sfree_request(init_cqr, device);
1282 dasd_kick_device(device); 1315 dasd_kick_device(device);
1283} 1316}
1284 1317
1285static int 1318static int dasd_eckd_start_analysis(struct dasd_block *block)
1286dasd_eckd_start_analysis(struct dasd_block *block)
1287{ 1319{
1288 struct dasd_eckd_private *private; 1320 struct dasd_eckd_private *private;
1289 struct dasd_ccw_req *init_cqr; 1321 struct dasd_ccw_req *init_cqr;
@@ -1295,27 +1327,44 @@ dasd_eckd_start_analysis(struct dasd_block *block)
1295 init_cqr->callback = dasd_eckd_analysis_callback; 1327 init_cqr->callback = dasd_eckd_analysis_callback;
1296 init_cqr->callback_data = NULL; 1328 init_cqr->callback_data = NULL;
1297 init_cqr->expires = 5*HZ; 1329 init_cqr->expires = 5*HZ;
1330 /* first try without ERP, so we can later handle unformatted
1331 * devices as special case
1332 */
1333 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1334 init_cqr->retries = 0;
1298 dasd_add_request_head(init_cqr); 1335 dasd_add_request_head(init_cqr);
1299 return -EAGAIN; 1336 return -EAGAIN;
1300} 1337}
1301 1338
1302static int 1339static int dasd_eckd_end_analysis(struct dasd_block *block)
1303dasd_eckd_end_analysis(struct dasd_block *block)
1304{ 1340{
1305 struct dasd_device *device; 1341 struct dasd_device *device;
1306 struct dasd_eckd_private *private; 1342 struct dasd_eckd_private *private;
1307 struct eckd_count *count_area; 1343 struct eckd_count *count_area;
1308 unsigned int sb, blk_per_trk; 1344 unsigned int sb, blk_per_trk;
1309 int status, i; 1345 int status, i;
1346 struct dasd_ccw_req *init_cqr;
1310 1347
1311 device = block->base; 1348 device = block->base;
1312 private = (struct dasd_eckd_private *) device->private; 1349 private = (struct dasd_eckd_private *) device->private;
1313 status = private->init_cqr_status; 1350 status = private->init_cqr_status;
1314 private->init_cqr_status = -1; 1351 private->init_cqr_status = -1;
1315 if (status != DASD_CQR_DONE) { 1352 if (status == INIT_CQR_ERROR) {
1316 dev_warn(&device->cdev->dev, 1353 /* try again, this time with full ERP */
1317 "The DASD is not formatted\n"); 1354 init_cqr = dasd_eckd_analysis_ccw(device);
1355 dasd_sleep_on(init_cqr);
1356 status = dasd_eckd_analysis_evaluation(init_cqr);
1357 dasd_sfree_request(init_cqr, device);
1358 }
1359
1360 if (status == INIT_CQR_UNFORMATTED) {
1361 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1318 return -EMEDIUMTYPE; 1362 return -EMEDIUMTYPE;
1363 } else if (status == INIT_CQR_ERROR) {
1364 dev_err(&device->cdev->dev,
1365 "Detecting the DASD disk layout failed because "
1366 "of an I/O error\n");
1367 return -EIO;
1319 } 1368 }
1320 1369
1321 private->uses_cdl = 1; 1370 private->uses_cdl = 1;
@@ -1607,8 +1656,7 @@ dasd_eckd_format_device(struct dasd_device * device,
1607 } 1656 }
1608 fcp->startdev = device; 1657 fcp->startdev = device;
1609 fcp->memdev = device; 1658 fcp->memdev = device;
1610 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags); 1659 fcp->retries = 256;
1611 fcp->retries = 5; /* set retry counter to enable default ERP */
1612 fcp->buildclk = get_clock(); 1660 fcp->buildclk = get_clock();
1613 fcp->status = DASD_CQR_FILLED; 1661 fcp->status = DASD_CQR_FILLED;
1614 return fcp; 1662 return fcp;
@@ -2338,6 +2386,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2338 /* Calculate number of blocks/records per track. */ 2386 /* Calculate number of blocks/records per track. */
2339 blksize = block->bp_block; 2387 blksize = block->bp_block;
2340 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2388 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2389 if (blk_per_trk == 0)
2390 return ERR_PTR(-EINVAL);
2341 /* Calculate record id of first and last block. */ 2391 /* Calculate record id of first and last block. */
2342 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 2392 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2343 first_offs = sector_div(first_trk, blk_per_trk); 2393 first_offs = sector_div(first_trk, blk_per_trk);
@@ -2688,6 +2738,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2688 cqr->startdev = device; 2738 cqr->startdev = device;
2689 cqr->memdev = device; 2739 cqr->memdev = device;
2690 cqr->retries = 0; 2740 cqr->retries = 0;
2741 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2691 cqr->expires = 10 * HZ; 2742 cqr->expires = 10 * HZ;
2692 2743
2693 /* Prepare for Read Subsystem Data */ 2744 /* Prepare for Read Subsystem Data */
@@ -3211,8 +3262,10 @@ int dasd_eckd_pm_freeze(struct dasd_device *device)
3211int dasd_eckd_restore_device(struct dasd_device *device) 3262int dasd_eckd_restore_device(struct dasd_device *device)
3212{ 3263{
3213 struct dasd_eckd_private *private; 3264 struct dasd_eckd_private *private;
3265 struct dasd_eckd_characteristics temp_rdc_data;
3214 int is_known, rc; 3266 int is_known, rc;
3215 struct dasd_uid temp_uid; 3267 struct dasd_uid temp_uid;
3268 unsigned long flags;
3216 3269
3217 private = (struct dasd_eckd_private *) device->private; 3270 private = (struct dasd_eckd_private *) device->private;
3218 3271
@@ -3225,7 +3278,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3225 rc = dasd_eckd_generate_uid(device, &private->uid); 3278 rc = dasd_eckd_generate_uid(device, &private->uid);
3226 dasd_get_uid(device->cdev, &temp_uid); 3279 dasd_get_uid(device->cdev, &temp_uid);
3227 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) 3280 if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3228 dev_err(&device->cdev->dev, "The UID of the DASD has changed\n"); 3281 dev_err(&device->cdev->dev, "The UID of the DASD has "
3282 "changed\n");
3229 if (rc) 3283 if (rc)
3230 goto out_err; 3284 goto out_err;
3231 dasd_set_uid(device->cdev, &private->uid); 3285 dasd_set_uid(device->cdev, &private->uid);
@@ -3235,25 +3289,30 @@ int dasd_eckd_restore_device(struct dasd_device *device)
3235 if (is_known < 0) 3289 if (is_known < 0)
3236 return is_known; 3290 return is_known;
3237 if (!is_known) { 3291 if (!is_known) {
3238 /* new lcu found */ 3292 dasd_eckd_validate_server(device);
3239 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 3293 dasd_alias_lcu_setup_complete(device);
3240 if (rc) 3294 } else
3241 goto out_err; 3295 dasd_alias_wait_for_lcu_setup(device);
3242 } 3296
3297 /* RE-Read Configuration Data */
3298 rc = dasd_eckd_read_conf(device);
3299 if (rc)
3300 goto out_err;
3243 3301
3244 /* Read Feature Codes */ 3302 /* Read Feature Codes */
3245 dasd_eckd_read_features(device); 3303 dasd_eckd_read_features(device);
3246 3304
3247 /* Read Device Characteristics */ 3305 /* Read Device Characteristics */
3248 memset(&private->rdc_data, 0, sizeof(private->rdc_data));
3249 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 3306 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3250 &private->rdc_data, 64); 3307 &temp_rdc_data, 64);
3251 if (rc) { 3308 if (rc) {
3252 DBF_EVENT(DBF_WARNING, 3309 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3253 "Read device characteristics failed, rc=%d for " 3310 "Read device characteristic failed, rc=%d", rc);
3254 "device: %s", rc, dev_name(&device->cdev->dev));
3255 goto out_err; 3311 goto out_err;
3256 } 3312 }
3313 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3314 memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
3315 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3257 3316
3258 /* add device to alias management */ 3317 /* add device to alias management */
3259 dasd_alias_add_device(device); 3318 dasd_alias_add_device(device);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ad45bcac3ce4..864d53c04201 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -414,6 +414,7 @@ struct alias_lcu {
414 struct summary_unit_check_work_data suc_data; 414 struct summary_unit_check_work_data suc_data;
415 struct read_uac_work_data ruac_data; 415 struct read_uac_work_data ruac_data;
416 struct dasd_ccw_req *rsu_cqr; 416 struct dasd_ccw_req *rsu_cqr;
417 struct completion lcu_setup;
417}; 418};
418 419
419struct alias_pav_group { 420struct alias_pav_group {
@@ -460,5 +461,6 @@ int dasd_alias_remove_device(struct dasd_device *);
460struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *); 461struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
461void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *); 462void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
462void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *); 463void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
463 464void dasd_alias_lcu_setup_complete(struct dasd_device *);
465void dasd_alias_wait_for_lcu_setup(struct dasd_device *);
464#endif /* DASD_ECKD_H */ 466#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index d96039eae59b..1f3e967aaba8 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -536,7 +536,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
536 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); 536 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
537 if (!eerb) 537 if (!eerb)
538 return -ENOMEM; 538 return -ENOMEM;
539 lock_kernel();
540 eerb->buffer_page_count = eer_pages; 539 eerb->buffer_page_count = eer_pages;
541 if (eerb->buffer_page_count < 1 || 540 if (eerb->buffer_page_count < 1 ||
542 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 541 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
@@ -544,7 +543,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
544 DBF_EVENT(DBF_WARNING, "can't open device since module " 543 DBF_EVENT(DBF_WARNING, "can't open device since module "
545 "parameter eer_pages is smaller than 1 or" 544 "parameter eer_pages is smaller than 1 or"
546 " bigger than %d", (int)(INT_MAX / PAGE_SIZE)); 545 " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
547 unlock_kernel();
548 return -EINVAL; 546 return -EINVAL;
549 } 547 }
550 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; 548 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
@@ -552,14 +550,12 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
552 GFP_KERNEL); 550 GFP_KERNEL);
553 if (!eerb->buffer) { 551 if (!eerb->buffer) {
554 kfree(eerb); 552 kfree(eerb);
555 unlock_kernel();
556 return -ENOMEM; 553 return -ENOMEM;
557 } 554 }
558 if (dasd_eer_allocate_buffer_pages(eerb->buffer, 555 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
559 eerb->buffer_page_count)) { 556 eerb->buffer_page_count)) {
560 kfree(eerb->buffer); 557 kfree(eerb->buffer);
561 kfree(eerb); 558 kfree(eerb);
562 unlock_kernel();
563 return -ENOMEM; 559 return -ENOMEM;
564 } 560 }
565 filp->private_data = eerb; 561 filp->private_data = eerb;
@@ -567,7 +563,6 @@ static int dasd_eer_open(struct inode *inp, struct file *filp)
567 list_add(&eerb->list, &bufferlist); 563 list_add(&eerb->list, &bufferlist);
568 spin_unlock_irqrestore(&bufferlock, flags); 564 spin_unlock_irqrestore(&bufferlock, flags);
569 565
570 unlock_kernel();
571 return nonseekable_open(inp,filp); 566 return nonseekable_open(inp,filp);
572} 567}
573 568
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index f245377e8e27..0f152444ac77 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -20,7 +20,6 @@
20#include <asm/idals.h> 20#include <asm/idals.h>
21#include <asm/ebcdic.h> 21#include <asm/ebcdic.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/todclk.h>
24#include <asm/ccwdev.h> 23#include <asm/ccwdev.h>
25 24
26#include "dasd_int.h" 25#include "dasd_int.h"
@@ -141,9 +140,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
141 } 140 }
142 block = dasd_alloc_block(); 141 block = dasd_alloc_block();
143 if (IS_ERR(block)) { 142 if (IS_ERR(block)) {
144 DBF_EVENT(DBF_WARNING, "could not allocate dasd block " 143 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
145 "structure for device: %s", 144 "dasd block structure");
146 dev_name(&device->cdev->dev));
147 device->private = NULL; 145 device->private = NULL;
148 kfree(private); 146 kfree(private);
149 return PTR_ERR(block); 147 return PTR_ERR(block);
@@ -155,9 +153,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
155 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC, 153 rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
156 &private->rdc_data, 32); 154 &private->rdc_data, 32);
157 if (rc) { 155 if (rc) {
158 DBF_EVENT(DBF_WARNING, "Read device characteristics returned " 156 DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
159 "error %d for device: %s", 157 "characteristics returned error %d", rc);
160 rc, dev_name(&device->cdev->dev));
161 device->block = NULL; 158 device->block = NULL;
162 dasd_free_block(block); 159 dasd_free_block(block);
163 device->private = NULL; 160 device->private = NULL;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8afd9fa00875..e4c2143dabf6 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -108,6 +108,16 @@ do { \
108 d_data); \ 108 d_data); \
109} while(0) 109} while(0)
110 110
111#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
112do { \
113 struct ccw_dev_id __dev_id; \
114 ccw_device_get_id(d_cdev, &__dev_id); \
115 debug_sprintf_event(dasd_debug_area, \
116 d_level, \
117 "0.%x.%04x " d_str "\n", \
118 __dev_id.ssid, __dev_id.devno, d_data); \
119} while (0)
120
111#define DBF_EXC(d_level, d_str, d_data...)\ 121#define DBF_EXC(d_level, d_str, d_data...)\
112do { \ 122do { \
113 debug_sprintf_exception(dasd_debug_area, \ 123 debug_sprintf_exception(dasd_debug_area, \
@@ -595,6 +605,9 @@ int dasd_generic_restore_device(struct ccw_device *);
595int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int); 605int dasd_generic_read_dev_chars(struct dasd_device *, int, void *, int);
596char *dasd_get_sense(struct irb *); 606char *dasd_get_sense(struct irb *);
597 607
608void dasd_device_set_stop_bits(struct dasd_device *, int);
609void dasd_device_remove_stop_bits(struct dasd_device *, int);
610
598/* externals in dasd_devmap.c */ 611/* externals in dasd_devmap.c */
599extern int dasd_max_devindex; 612extern int dasd_max_devindex;
600extern int dasd_probeonly; 613extern int dasd_probeonly;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index f756a1b0c57a..478bcdb90b6f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -101,7 +101,7 @@ static int dasd_ioctl_quiesce(struct dasd_block *block)
101 pr_info("%s: The DASD has been put in the quiesce " 101 pr_info("%s: The DASD has been put in the quiesce "
102 "state\n", dev_name(&base->cdev->dev)); 102 "state\n", dev_name(&base->cdev->dev));
103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 103 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
104 base->stopped |= DASD_STOPPED_QUIESCE; 104 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 105 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
106 return 0; 106 return 0;
107} 107}
@@ -122,7 +122,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
122 pr_info("%s: I/O operations have been resumed " 122 pr_info("%s: I/O operations have been resumed "
123 "on the DASD\n", dev_name(&base->cdev->dev)); 123 "on the DASD\n", dev_name(&base->cdev->dev));
124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 124 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
125 base->stopped &= ~DASD_STOPPED_QUIESCE; 125 dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 126 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
127 127
128 dasd_schedule_block_bh(block); 128 dasd_schedule_block_bh(block);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 654daa3cdfda..6315fbd8e68b 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -14,6 +14,7 @@
14#define KMSG_COMPONENT "dasd" 14#define KMSG_COMPONENT "dasd"
15 15
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/string.h>
17#include <linux/seq_file.h> 18#include <linux/seq_file.h>
18#include <linux/vmalloc.h> 19#include <linux/vmalloc.h>
19#include <linux/proc_fs.h> 20#include <linux/proc_fs.h>
@@ -215,7 +216,7 @@ dasd_statistics_read(char *page, char **start, off_t off,
215 } 216 }
216 217
217 prof = &dasd_global_profile; 218 prof = &dasd_global_profile;
218 /* prevent couter 'overflow' on output */ 219 /* prevent counter 'overflow' on output */
219 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; 220 for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
220 factor *= 10); 221 factor *= 10);
221 222
@@ -272,10 +273,10 @@ dasd_statistics_write(struct file *file, const char __user *user_buf,
272 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer); 273 DBF_EVENT(DBF_DEBUG, "/proc/dasd/statictics: '%s'\n", buffer);
273 274
274 /* check for valid verbs */ 275 /* check for valid verbs */
275 for (str = buffer; isspace(*str); str++); 276 str = skip_spaces(buffer);
276 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) { 277 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
277 /* 'set xxx' was given */ 278 /* 'set xxx' was given */
278 for (str = str + 4; isspace(*str); str++); 279 str = skip_spaces(str + 4);
279 if (strcmp(str, "on") == 0) { 280 if (strcmp(str, "on") == 0) {
280 /* switch on statistics profiling */ 281 /* switch on statistics profiling */
281 dasd_profile_level = DASD_PROFILE_ON; 282 dasd_profile_level = DASD_PROFILE_ON;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index f76f4bd82b9f..9b43ae94beba 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -1005,7 +1005,7 @@ static int dcssblk_thaw(struct device *dev)
1005 return 0; 1005 return 0;
1006} 1006}
1007 1007
1008static struct dev_pm_ops dcssblk_pm_ops = { 1008static const struct dev_pm_ops dcssblk_pm_ops = {
1009 .freeze = dcssblk_freeze, 1009 .freeze = dcssblk_freeze,
1010 .thaw = dcssblk_thaw, 1010 .thaw = dcssblk_thaw,
1011 .restore = dcssblk_restore, 1011 .restore = dcssblk_restore,
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 116d1b3eeb15..118de392af63 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -407,7 +407,7 @@ static int xpram_restore(struct device *dev)
407 return 0; 407 return 0;
408} 408}
409 409
410static struct dev_pm_ops xpram_pm_ops = { 410static const struct dev_pm_ops xpram_pm_ops = {
411 .restore = xpram_restore, 411 .restore = xpram_restore,
412}; 412};
413 413
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 21639d6c996f..9d61683b5633 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -857,7 +857,6 @@ static struct console con3215 = {
857 857
858/* 858/*
859 * 3215 console initialization code called from console_init(). 859 * 3215 console initialization code called from console_init().
860 * NOTE: This is called before kmalloc is available.
861 */ 860 */
862static int __init con3215_init(void) 861static int __init con3215_init(void)
863{ 862{
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index bb838bdf829d..6bca81aea396 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -572,7 +572,6 @@ static struct console con3270 = {
572 572
573/* 573/*
574 * 3270 console initialization code called from console_init(). 574 * 3270 console initialization code called from console_init().
575 * NOTE: This is called before kmalloc is available.
576 */ 575 */
577static int __init 576static int __init
578con3270_init(void) 577con3270_init(void)
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 097d3846a828..28e4649fa9e4 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -38,6 +38,8 @@ struct fs3270 {
38 size_t rdbuf_size; /* size of data returned by RDBUF */ 38 size_t rdbuf_size; /* size of data returned by RDBUF */
39}; 39};
40 40
41static DEFINE_MUTEX(fs3270_mutex);
42
41static void 43static void
42fs3270_wake_up(struct raw3270_request *rq, void *data) 44fs3270_wake_up(struct raw3270_request *rq, void *data)
43{ 45{
@@ -74,7 +76,7 @@ fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
74 } 76 }
75 rc = raw3270_start(view, rq); 77 rc = raw3270_start(view, rq);
76 if (rc == 0) { 78 if (rc == 0) {
77 /* Started sucessfully. Now wait for completion. */ 79 /* Started successfully. Now wait for completion. */
78 wait_event(fp->wait, raw3270_request_final(rq)); 80 wait_event(fp->wait, raw3270_request_final(rq));
79 } 81 }
80 } while (rc == -EACCES); 82 } while (rc == -EACCES);
@@ -328,7 +330,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
328 if (!fp) 330 if (!fp)
329 return -ENODEV; 331 return -ENODEV;
330 rc = 0; 332 rc = 0;
331 lock_kernel(); 333 mutex_lock(&fs3270_mutex);
332 switch (cmd) { 334 switch (cmd) {
333 case TUBICMD: 335 case TUBICMD:
334 fp->read_command = arg; 336 fp->read_command = arg;
@@ -354,7 +356,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
354 rc = -EFAULT; 356 rc = -EFAULT;
355 break; 357 break;
356 } 358 }
357 unlock_kernel(); 359 mutex_unlock(&fs3270_mutex);
358 return rc; 360 return rc;
359} 361}
360 362
@@ -437,7 +439,7 @@ fs3270_open(struct inode *inode, struct file *filp)
437 minor = tty->index + RAW3270_FIRSTMINOR; 439 minor = tty->index + RAW3270_FIRSTMINOR;
438 tty_kref_put(tty); 440 tty_kref_put(tty);
439 } 441 }
440 lock_kernel(); 442 mutex_lock(&fs3270_mutex);
441 /* Check if some other program is already using fullscreen mode. */ 443 /* Check if some other program is already using fullscreen mode. */
442 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor); 444 fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
443 if (!IS_ERR(fp)) { 445 if (!IS_ERR(fp)) {
@@ -478,7 +480,7 @@ fs3270_open(struct inode *inode, struct file *filp)
478 } 480 }
479 filp->private_data = fp; 481 filp->private_data = fp;
480out: 482out:
481 unlock_kernel(); 483 mutex_unlock(&fs3270_mutex);
482 return rc; 484 return rc;
483} 485}
484 486
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 89ece1c235aa..33e96484d54f 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -12,7 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/smp_lock.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -283,7 +282,6 @@ static int mon_open(struct inode *inode, struct file *filp)
283 /* 282 /*
284 * only one user allowed 283 * only one user allowed
285 */ 284 */
286 lock_kernel();
287 rc = -EBUSY; 285 rc = -EBUSY;
288 if (test_and_set_bit(MON_IN_USE, &mon_in_use)) 286 if (test_and_set_bit(MON_IN_USE, &mon_in_use))
289 goto out; 287 goto out;
@@ -321,7 +319,6 @@ static int mon_open(struct inode *inode, struct file *filp)
321 } 319 }
322 filp->private_data = monpriv; 320 filp->private_data = monpriv;
323 dev_set_drvdata(monreader_device, monpriv); 321 dev_set_drvdata(monreader_device, monpriv);
324 unlock_kernel();
325 return nonseekable_open(inode, filp); 322 return nonseekable_open(inode, filp);
326 323
327out_path: 324out_path:
@@ -331,7 +328,6 @@ out_priv:
331out_use: 328out_use:
332 clear_bit(MON_IN_USE, &mon_in_use); 329 clear_bit(MON_IN_USE, &mon_in_use);
333out: 330out:
334 unlock_kernel();
335 return rc; 331 return rc;
336} 332}
337 333
@@ -357,6 +353,7 @@ static int mon_close(struct inode *inode, struct file *filp)
357 atomic_set(&monpriv->msglim_count, 0); 353 atomic_set(&monpriv->msglim_count, 0);
358 monpriv->write_index = 0; 354 monpriv->write_index = 0;
359 monpriv->read_index = 0; 355 monpriv->read_index = 0;
356 dev_set_drvdata(monreader_device, NULL);
360 357
361 for (i = 0; i < MON_MSGLIM; i++) 358 for (i = 0; i < MON_MSGLIM; i++)
362 kfree(monpriv->msg_array[i]); 359 kfree(monpriv->msg_array[i]);
@@ -532,7 +529,7 @@ static int monreader_restore(struct device *dev)
532 return monreader_thaw(dev); 529 return monreader_thaw(dev);
533} 530}
534 531
535static struct dev_pm_ops monreader_pm_ops = { 532static const struct dev_pm_ops monreader_pm_ops = {
536 .freeze = monreader_freeze, 533 .freeze = monreader_freeze,
537 .thaw = monreader_thaw, 534 .thaw = monreader_thaw,
538 .restore = monreader_restore, 535 .restore = monreader_restore,
@@ -606,6 +603,10 @@ static int __init mon_init(void)
606 } 603 }
607 dcss_mkname(mon_dcss_name, &user_data_connect[8]); 604 dcss_mkname(mon_dcss_name, &user_data_connect[8]);
608 605
606 /*
607 * misc_register() has to be the last action in module_init(), because
608 * file operations will be available right after this.
609 */
609 rc = misc_register(&mon_dev); 610 rc = misc_register(&mon_dev);
610 if (rc < 0 ) 611 if (rc < 0 )
611 goto out; 612 goto out;
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index 66fb8eba93f4..668a0579b26b 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -13,7 +13,6 @@
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/smp_lock.h>
17#include <linux/types.h> 16#include <linux/types.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
@@ -185,13 +184,11 @@ static int monwrite_open(struct inode *inode, struct file *filp)
185 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); 184 monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
186 if (!monpriv) 185 if (!monpriv)
187 return -ENOMEM; 186 return -ENOMEM;
188 lock_kernel();
189 INIT_LIST_HEAD(&monpriv->list); 187 INIT_LIST_HEAD(&monpriv->list);
190 monpriv->hdr_to_read = sizeof(monpriv->hdr); 188 monpriv->hdr_to_read = sizeof(monpriv->hdr);
191 mutex_init(&monpriv->thread_mutex); 189 mutex_init(&monpriv->thread_mutex);
192 filp->private_data = monpriv; 190 filp->private_data = monpriv;
193 list_add_tail(&monpriv->priv_list, &mon_priv_list); 191 list_add_tail(&monpriv->priv_list, &mon_priv_list);
194 unlock_kernel();
195 return nonseekable_open(inode, filp); 192 return nonseekable_open(inode, filp);
196} 193}
197 194
@@ -326,7 +323,7 @@ static int monwriter_thaw(struct device *dev)
326 return monwriter_restore(dev); 323 return monwriter_restore(dev);
327} 324}
328 325
329static struct dev_pm_ops monwriter_pm_ops = { 326static const struct dev_pm_ops monwriter_pm_ops = {
330 .freeze = monwriter_freeze, 327 .freeze = monwriter_freeze,
331 .thaw = monwriter_thaw, 328 .thaw = monwriter_thaw,
332 .restore = monwriter_restore, 329 .restore = monwriter_restore,
@@ -364,6 +361,10 @@ static int __init mon_init(void)
364 goto out_driver; 361 goto out_driver;
365 } 362 }
366 363
364 /*
365 * misc_register() has to be the last action in module_init(), because
366 * file operations will be available right after this.
367 */
367 rc = misc_register(&mon_dev); 368 rc = misc_register(&mon_dev);
368 if (rc) 369 if (rc)
369 goto out_device; 370 goto out_device;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index d6a022f55e92..62ddf5202b79 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1361,11 +1361,13 @@ static int raw3270_pm_start(struct ccw_device *cdev)
1361 1361
1362void raw3270_pm_unfreeze(struct raw3270_view *view) 1362void raw3270_pm_unfreeze(struct raw3270_view *view)
1363{ 1363{
1364#ifdef CONFIG_TN3270_CONSOLE
1364 struct raw3270 *rp; 1365 struct raw3270 *rp;
1365 1366
1366 rp = view->dev; 1367 rp = view->dev;
1367 if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) 1368 if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags))
1368 ccw_device_force_console(); 1369 ccw_device_force_console();
1370#endif
1369} 1371}
1370 1372
1371static struct ccw_device_id raw3270_id[] = { 1373static struct ccw_device_id raw3270_id[] = {
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a983f5086788..ec88c59842e3 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -1019,7 +1019,7 @@ static int sclp_restore(struct device *dev)
1019 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); 1019 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1020} 1020}
1021 1021
1022static struct dev_pm_ops sclp_pm_ops = { 1022static const struct dev_pm_ops sclp_pm_ops = {
1023 .freeze = sclp_freeze, 1023 .freeze = sclp_freeze,
1024 .thaw = sclp_thaw, 1024 .thaw = sclp_thaw,
1025 .restore = sclp_restore, 1025 .restore = sclp_restore,
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index daaec185ed36..740fe405c395 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -26,7 +26,6 @@ static struct sclp_async_sccb *sccb;
26static int sclp_async_send_wait(char *message); 26static int sclp_async_send_wait(char *message);
27static struct ctl_table_header *callhome_sysctl_header; 27static struct ctl_table_header *callhome_sysctl_header;
28static DEFINE_SPINLOCK(sclp_async_lock); 28static DEFINE_SPINLOCK(sclp_async_lock);
29static char nodename[64];
30#define SCLP_NORMAL_WRITE 0x00 29#define SCLP_NORMAL_WRITE 0x00
31 30
32struct async_evbuf { 31struct async_evbuf {
@@ -52,9 +51,10 @@ static struct sclp_register sclp_async_register = {
52static int call_home_on_panic(struct notifier_block *self, 51static int call_home_on_panic(struct notifier_block *self,
53 unsigned long event, void *data) 52 unsigned long event, void *data)
54{ 53{
55 strncat(data, nodename, strlen(nodename)); 54 strncat(data, init_utsname()->nodename,
56 sclp_async_send_wait(data); 55 sizeof(init_utsname()->nodename));
57 return NOTIFY_DONE; 56 sclp_async_send_wait(data);
57 return NOTIFY_DONE;
58} 58}
59 59
60static struct notifier_block call_home_panic_nb = { 60static struct notifier_block call_home_panic_nb = {
@@ -62,21 +62,20 @@ static struct notifier_block call_home_panic_nb = {
62 .priority = INT_MAX, 62 .priority = INT_MAX,
63}; 63};
64 64
65static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp, 65static int proc_handler_callhome(struct ctl_table *ctl, int write,
66 void __user *buffer, size_t *count, 66 void __user *buffer, size_t *count,
67 loff_t *ppos) 67 loff_t *ppos)
68{ 68{
69 unsigned long val; 69 unsigned long val;
70 int len, rc; 70 int len, rc;
71 char buf[2]; 71 char buf[3];
72 72
73 if (!*count | (*ppos && !write)) { 73 if (!*count || (*ppos && !write)) {
74 *count = 0; 74 *count = 0;
75 return 0; 75 return 0;
76 } 76 }
77 if (!write) { 77 if (!write) {
78 len = sprintf(buf, "%d\n", callhome_enabled); 78 len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
79 buf[len] = '\0';
80 rc = copy_to_user(buffer, buf, sizeof(buf)); 79 rc = copy_to_user(buffer, buf, sizeof(buf));
81 if (rc != 0) 80 if (rc != 0)
82 return -EFAULT; 81 return -EFAULT;
@@ -100,20 +99,19 @@ static struct ctl_table callhome_table[] = {
100 { 99 {
101 .procname = "callhome", 100 .procname = "callhome",
102 .mode = 0644, 101 .mode = 0644,
103 .proc_handler = &proc_handler_callhome, 102 .proc_handler = proc_handler_callhome,
104 }, 103 },
105 { .ctl_name = 0 } 104 {}
106}; 105};
107 106
108static struct ctl_table kern_dir_table[] = { 107static struct ctl_table kern_dir_table[] = {
109 { 108 {
110 .ctl_name = CTL_KERN,
111 .procname = "kernel", 109 .procname = "kernel",
112 .maxlen = 0, 110 .maxlen = 0,
113 .mode = 0555, 111 .mode = 0555,
114 .child = callhome_table, 112 .child = callhome_table,
115 }, 113 },
116 { .ctl_name = 0 } 114 {}
117}; 115};
118 116
119/* 117/*
@@ -171,39 +169,29 @@ static int __init sclp_async_init(void)
171 rc = sclp_register(&sclp_async_register); 169 rc = sclp_register(&sclp_async_register);
172 if (rc) 170 if (rc)
173 return rc; 171 return rc;
174 callhome_sysctl_header = register_sysctl_table(kern_dir_table); 172 rc = -EOPNOTSUPP;
175 if (!callhome_sysctl_header) { 173 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
176 rc = -ENOMEM;
177 goto out_sclp;
178 }
179 if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) {
180 rc = -EOPNOTSUPP;
181 goto out_sclp; 174 goto out_sclp;
182 }
183 rc = -ENOMEM; 175 rc = -ENOMEM;
176 callhome_sysctl_header = register_sysctl_table(kern_dir_table);
177 if (!callhome_sysctl_header)
178 goto out_sclp;
184 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); 179 request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
185 if (!request)
186 goto out_sys;
187 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 180 sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
188 if (!sccb) 181 if (!request || !sccb)
189 goto out_mem; 182 goto out_mem;
190 rc = atomic_notifier_chain_register(&panic_notifier_list, 183 rc = atomic_notifier_chain_register(&panic_notifier_list,
191 &call_home_panic_nb); 184 &call_home_panic_nb);
192 if (rc) 185 if (!rc)
193 goto out_mem; 186 goto out;
194
195 strncpy(nodename, init_utsname()->nodename, 64);
196 return 0;
197
198out_mem: 187out_mem:
199 kfree(request); 188 kfree(request);
200 free_page((unsigned long) sccb); 189 free_page((unsigned long) sccb);
201out_sys:
202 unregister_sysctl_table(callhome_sysctl_header); 190 unregister_sysctl_table(callhome_sysctl_header);
203out_sclp: 191out_sclp:
204 sclp_unregister(&sclp_async_register); 192 sclp_unregister(&sclp_async_register);
193out:
205 return rc; 194 return rc;
206
207} 195}
208module_init(sclp_async_init); 196module_init(sclp_async_init);
209 197
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 5cc11c636d38..b3beab610da4 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -84,6 +84,7 @@ static void __init sclp_read_info_early(void)
84 do { 84 do {
85 memset(sccb, 0, sizeof(*sccb)); 85 memset(sccb, 0, sizeof(*sccb));
86 sccb->header.length = sizeof(*sccb); 86 sccb->header.length = sizeof(*sccb);
87 sccb->header.function_code = 0x80;
87 sccb->header.control_mask[2] = 0x80; 88 sccb->header.control_mask[2] = 0x80;
88 rc = sclp_cmd_sync_early(commands[i], sccb); 89 rc = sclp_cmd_sync_early(commands[i], sccb);
89 } while (rc == -EBUSY); 90 } while (rc == -EBUSY);
@@ -546,7 +547,7 @@ struct read_storage_sccb {
546 u32 entries[0]; 547 u32 entries[0];
547} __packed; 548} __packed;
548 549
549static struct dev_pm_ops sclp_mem_pm_ops = { 550static const struct dev_pm_ops sclp_mem_pm_ops = {
550 .freeze = sclp_mem_freeze, 551 .freeze = sclp_mem_freeze,
551}; 552};
552 553
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 84c191c1cd62..05909a7df8b3 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -20,9 +20,12 @@
20 20
21#include "sclp.h" 21#include "sclp.h"
22 22
23static void (*old_machine_restart)(char *);
24static void (*old_machine_halt)(void);
25static void (*old_machine_power_off)(void);
26
23/* Shutdown handler. Signal completion of shutdown by loading special PSW. */ 27/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
24static void 28static void do_machine_quiesce(void)
25do_machine_quiesce(void)
26{ 29{
27 psw_t quiesce_psw; 30 psw_t quiesce_psw;
28 31
@@ -33,23 +36,48 @@ do_machine_quiesce(void)
33} 36}
34 37
35/* Handler for quiesce event. Start shutdown procedure. */ 38/* Handler for quiesce event. Start shutdown procedure. */
36static void 39static void sclp_quiesce_handler(struct evbuf_header *evbuf)
37sclp_quiesce_handler(struct evbuf_header *evbuf)
38{ 40{
39 _machine_restart = (void *) do_machine_quiesce; 41 if (_machine_restart != (void *) do_machine_quiesce) {
40 _machine_halt = do_machine_quiesce; 42 old_machine_restart = _machine_restart;
41 _machine_power_off = do_machine_quiesce; 43 old_machine_halt = _machine_halt;
44 old_machine_power_off = _machine_power_off;
45 _machine_restart = (void *) do_machine_quiesce;
46 _machine_halt = do_machine_quiesce;
47 _machine_power_off = do_machine_quiesce;
48 }
42 ctrl_alt_del(); 49 ctrl_alt_del();
43} 50}
44 51
52/* Undo machine restart/halt/power_off modification on resume */
53static void sclp_quiesce_pm_event(struct sclp_register *reg,
54 enum sclp_pm_event sclp_pm_event)
55{
56 switch (sclp_pm_event) {
57 case SCLP_PM_EVENT_RESTORE:
58 if (old_machine_restart) {
59 _machine_restart = old_machine_restart;
60 _machine_halt = old_machine_halt;
61 _machine_power_off = old_machine_power_off;
62 old_machine_restart = NULL;
63 old_machine_halt = NULL;
64 old_machine_power_off = NULL;
65 }
66 break;
67 case SCLP_PM_EVENT_FREEZE:
68 case SCLP_PM_EVENT_THAW:
69 break;
70 }
71}
72
45static struct sclp_register sclp_quiesce_event = { 73static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EVTYP_SIGQUIESCE_MASK, 74 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 75 .receiver_fn = sclp_quiesce_handler,
76 .pm_event_fn = sclp_quiesce_pm_event
48}; 77};
49 78
50/* Initialize quiesce driver. */ 79/* Initialize quiesce driver. */
51static int __init 80static int __init sclp_quiesce_init(void)
52sclp_quiesce_init(void)
53{ 81{
54 return sclp_register(&sclp_quiesce_event); 82 return sclp_register(&sclp_quiesce_event);
55} 83}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 178724f2a4c3..b9d2a007e93b 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -705,21 +705,6 @@ out_driver:
705} 705}
706__initcall(sclp_vt220_tty_init); 706__initcall(sclp_vt220_tty_init);
707 707
708#ifdef CONFIG_SCLP_VT220_CONSOLE
709
710static void
711sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
712{
713 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
714}
715
716static struct tty_driver *
717sclp_vt220_con_device(struct console *c, int *index)
718{
719 *index = 0;
720 return sclp_vt220_driver;
721}
722
723static void __sclp_vt220_flush_buffer(void) 708static void __sclp_vt220_flush_buffer(void)
724{ 709{
725 unsigned long flags; 710 unsigned long flags;
@@ -776,6 +761,21 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
776 } 761 }
777} 762}
778 763
764#ifdef CONFIG_SCLP_VT220_CONSOLE
765
766static void
767sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
768{
769 __sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
770}
771
772static struct tty_driver *
773sclp_vt220_con_device(struct console *c, int *index)
774{
775 *index = 0;
776 return sclp_vt220_driver;
777}
778
779static int 779static int
780sclp_vt220_notify(struct notifier_block *self, 780sclp_vt220_notify(struct notifier_block *self,
781 unsigned long event, void *data) 781 unsigned long event, void *data)
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index a26333774701..7a242f073632 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -212,6 +212,9 @@ struct tape_device {
212 struct tape_class_device * nt; 212 struct tape_class_device * nt;
213 struct tape_class_device * rt; 213 struct tape_class_device * rt;
214 214
215 /* Device mutex to serialize tape commands. */
216 struct mutex mutex;
217
215 /* Device discipline information. */ 218 /* Device discipline information. */
216 struct tape_discipline * discipline; 219 struct tape_discipline * discipline;
217 void * discdata; 220 void * discdata;
@@ -292,9 +295,9 @@ extern int tape_generic_pm_suspend(struct ccw_device *);
292extern int tape_generic_probe(struct ccw_device *); 295extern int tape_generic_probe(struct ccw_device *);
293extern void tape_generic_remove(struct ccw_device *); 296extern void tape_generic_remove(struct ccw_device *);
294 297
295extern struct tape_device *tape_get_device(int devindex); 298extern struct tape_device *tape_find_device(int devindex);
296extern struct tape_device *tape_get_device_reference(struct tape_device *); 299extern struct tape_device *tape_get_device(struct tape_device *);
297extern struct tape_device *tape_put_device(struct tape_device *); 300extern void tape_put_device(struct tape_device *);
298 301
299/* Externals from tape_char.c */ 302/* Externals from tape_char.c */
300extern int tapechar_init(void); 303extern int tapechar_init(void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 2fe45ff77b75..3657fe103c27 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -113,16 +113,16 @@ tape_34xx_work_handler(struct work_struct *work)
113{ 113{
114 struct tape_34xx_work *p = 114 struct tape_34xx_work *p =
115 container_of(work, struct tape_34xx_work, work); 115 container_of(work, struct tape_34xx_work, work);
116 struct tape_device *device = p->device;
116 117
117 switch(p->op) { 118 switch(p->op) {
118 case TO_MSEN: 119 case TO_MSEN:
119 tape_34xx_medium_sense(p->device); 120 tape_34xx_medium_sense(device);
120 break; 121 break;
121 default: 122 default:
122 DBF_EVENT(3, "T34XX: internal error: unknown work\n"); 123 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
123 } 124 }
124 125 tape_put_device(device);
125 p->device = tape_put_device(p->device);
126 kfree(p); 126 kfree(p);
127} 127}
128 128
@@ -136,7 +136,7 @@ tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
136 136
137 INIT_WORK(&p->work, tape_34xx_work_handler); 137 INIT_WORK(&p->work, tape_34xx_work_handler);
138 138
139 p->device = tape_get_device_reference(device); 139 p->device = tape_get_device(device);
140 p->op = op; 140 p->op = op;
141 141
142 schedule_work(&p->work); 142 schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index e4cc3aae9162..0c72aadb8391 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -608,7 +608,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
608 608
609 INIT_WORK(&p->work, tape_3590_work_handler); 609 INIT_WORK(&p->work, tape_3590_work_handler);
610 610
611 p->device = tape_get_device_reference(device); 611 p->device = tape_get_device(device);
612 p->op = op; 612 p->op = op;
613 613
614 schedule_work(&p->work); 614 schedule_work(&p->work);
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 64f57ef2763c..4799cc2f73c3 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -54,7 +54,7 @@ static const struct block_device_operations tapeblock_fops = {
54 .owner = THIS_MODULE, 54 .owner = THIS_MODULE,
55 .open = tapeblock_open, 55 .open = tapeblock_open,
56 .release = tapeblock_release, 56 .release = tapeblock_release,
57 .locked_ioctl = tapeblock_ioctl, 57 .ioctl = tapeblock_ioctl,
58 .media_changed = tapeblock_medium_changed, 58 .media_changed = tapeblock_medium_changed,
59 .revalidate_disk = tapeblock_revalidate_disk, 59 .revalidate_disk = tapeblock_revalidate_disk,
60}; 60};
@@ -162,9 +162,10 @@ tapeblock_requeue(struct work_struct *work) {
162 spin_lock_irq(&device->blk_data.request_queue_lock); 162 spin_lock_irq(&device->blk_data.request_queue_lock);
163 while ( 163 while (
164 !blk_queue_plugged(queue) && 164 !blk_queue_plugged(queue) &&
165 (req = blk_fetch_request(queue)) && 165 blk_peek_request(queue) &&
166 nr_queued < TAPEBLOCK_MIN_REQUEUE 166 nr_queued < TAPEBLOCK_MIN_REQUEUE
167 ) { 167 ) {
168 req = blk_fetch_request(queue);
168 if (rq_data_dir(req) == WRITE) { 169 if (rq_data_dir(req) == WRITE) {
169 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 170 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
170 spin_unlock_irq(&device->blk_data.request_queue_lock); 171 spin_unlock_irq(&device->blk_data.request_queue_lock);
@@ -238,7 +239,7 @@ tapeblock_setup_device(struct tape_device * device)
238 disk->major = tapeblock_major; 239 disk->major = tapeblock_major;
239 disk->first_minor = device->first_minor; 240 disk->first_minor = device->first_minor;
240 disk->fops = &tapeblock_fops; 241 disk->fops = &tapeblock_fops;
241 disk->private_data = tape_get_device_reference(device); 242 disk->private_data = tape_get_device(device);
242 disk->queue = blkdat->request_queue; 243 disk->queue = blkdat->request_queue;
243 set_capacity(disk, 0); 244 set_capacity(disk, 0);
244 sprintf(disk->disk_name, "btibm%d", 245 sprintf(disk->disk_name, "btibm%d",
@@ -246,11 +247,11 @@ tapeblock_setup_device(struct tape_device * device)
246 247
247 blkdat->disk = disk; 248 blkdat->disk = disk;
248 blkdat->medium_changed = 1; 249 blkdat->medium_changed = 1;
249 blkdat->request_queue->queuedata = tape_get_device_reference(device); 250 blkdat->request_queue->queuedata = tape_get_device(device);
250 251
251 add_disk(disk); 252 add_disk(disk);
252 253
253 tape_get_device_reference(device); 254 tape_get_device(device);
254 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue); 255 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
255 256
256 return 0; 257 return 0;
@@ -273,13 +274,14 @@ tapeblock_cleanup_device(struct tape_device *device)
273 } 274 }
274 275
275 del_gendisk(device->blk_data.disk); 276 del_gendisk(device->blk_data.disk);
276 device->blk_data.disk->private_data = 277 device->blk_data.disk->private_data = NULL;
277 tape_put_device(device->blk_data.disk->private_data); 278 tape_put_device(device);
278 put_disk(device->blk_data.disk); 279 put_disk(device->blk_data.disk);
279 280
280 device->blk_data.disk = NULL; 281 device->blk_data.disk = NULL;
281cleanup_queue: 282cleanup_queue:
282 device->blk_data.request_queue->queuedata = tape_put_device(device); 283 device->blk_data.request_queue->queuedata = NULL;
284 tape_put_device(device);
283 285
284 blk_cleanup_queue(device->blk_data.request_queue); 286 blk_cleanup_queue(device->blk_data.request_queue);
285 device->blk_data.request_queue = NULL; 287 device->blk_data.request_queue = NULL;
@@ -362,7 +364,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode)
362 struct tape_device * device; 364 struct tape_device * device;
363 int rc; 365 int rc;
364 366
365 device = tape_get_device_reference(disk->private_data); 367 device = tape_get_device(disk->private_data);
366 368
367 if (device->required_tapemarks) { 369 if (device->required_tapemarks) {
368 DBF_EVENT(2, "TBLOCK: missing tapemarks\n"); 370 DBF_EVENT(2, "TBLOCK: missing tapemarks\n");
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31566c55adfe..23d773a0d113 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -33,8 +33,7 @@ static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
33static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *); 33static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
34static int tapechar_open(struct inode *,struct file *); 34static int tapechar_open(struct inode *,struct file *);
35static int tapechar_release(struct inode *,struct file *); 35static int tapechar_release(struct inode *,struct file *);
36static int tapechar_ioctl(struct inode *, struct file *, unsigned int, 36static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
37 unsigned long);
38static long tapechar_compat_ioctl(struct file *, unsigned int, 37static long tapechar_compat_ioctl(struct file *, unsigned int,
39 unsigned long); 38 unsigned long);
40 39
@@ -43,7 +42,7 @@ static const struct file_operations tape_fops =
43 .owner = THIS_MODULE, 42 .owner = THIS_MODULE,
44 .read = tapechar_read, 43 .read = tapechar_read,
45 .write = tapechar_write, 44 .write = tapechar_write,
46 .ioctl = tapechar_ioctl, 45 .unlocked_ioctl = tapechar_ioctl,
47 .compat_ioctl = tapechar_compat_ioctl, 46 .compat_ioctl = tapechar_compat_ioctl,
48 .open = tapechar_open, 47 .open = tapechar_open,
49 .release = tapechar_release, 48 .release = tapechar_release,
@@ -170,7 +169,6 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
170 if (rc == 0) { 169 if (rc == 0) {
171 rc = block_size - request->rescnt; 170 rc = block_size - request->rescnt;
172 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc); 171 DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
173 filp->f_pos += rc;
174 /* Copy data from idal buffer to user space. */ 172 /* Copy data from idal buffer to user space. */
175 if (idal_buffer_to_user(device->char_data.idal_buf, 173 if (idal_buffer_to_user(device->char_data.idal_buf,
176 data, rc) != 0) 174 data, rc) != 0)
@@ -238,7 +236,6 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
238 break; 236 break;
239 DBF_EVENT(6, "TCHAR:wbytes: %lx\n", 237 DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
240 block_size - request->rescnt); 238 block_size - request->rescnt);
241 filp->f_pos += block_size - request->rescnt;
242 written += block_size - request->rescnt; 239 written += block_size - request->rescnt;
243 if (request->rescnt != 0) 240 if (request->rescnt != 0)
244 break; 241 break;
@@ -286,26 +283,20 @@ tapechar_open (struct inode *inode, struct file *filp)
286 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major) 283 if (imajor(filp->f_path.dentry->d_inode) != tapechar_major)
287 return -ENODEV; 284 return -ENODEV;
288 285
289 lock_kernel();
290 minor = iminor(filp->f_path.dentry->d_inode); 286 minor = iminor(filp->f_path.dentry->d_inode);
291 device = tape_get_device(minor / TAPE_MINORS_PER_DEV); 287 device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
292 if (IS_ERR(device)) { 288 if (IS_ERR(device)) {
293 DBF_EVENT(3, "TCHAR:open: tape_get_device() failed\n"); 289 DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
294 rc = PTR_ERR(device); 290 return PTR_ERR(device);
295 goto out;
296 } 291 }
297 292
298
299 rc = tape_open(device); 293 rc = tape_open(device);
300 if (rc == 0) { 294 if (rc == 0) {
301 filp->private_data = device; 295 filp->private_data = device;
302 rc = nonseekable_open(inode, filp); 296 nonseekable_open(inode, filp);
303 } 297 } else
304 else
305 tape_put_device(device); 298 tape_put_device(device);
306 299
307out:
308 unlock_kernel();
309 return rc; 300 return rc;
310} 301}
311 302
@@ -342,7 +333,8 @@ tapechar_release(struct inode *inode, struct file *filp)
342 device->char_data.idal_buf = NULL; 333 device->char_data.idal_buf = NULL;
343 } 334 }
344 tape_release(device); 335 tape_release(device);
345 filp->private_data = tape_put_device(device); 336 filp->private_data = NULL;
337 tape_put_device(device);
346 338
347 return 0; 339 return 0;
348} 340}
@@ -351,16 +343,11 @@ tapechar_release(struct inode *inode, struct file *filp)
351 * Tape device io controls. 343 * Tape device io controls.
352 */ 344 */
353static int 345static int
354tapechar_ioctl(struct inode *inp, struct file *filp, 346__tapechar_ioctl(struct tape_device *device,
355 unsigned int no, unsigned long data) 347 unsigned int no, unsigned long data)
356{ 348{
357 struct tape_device *device;
358 int rc; 349 int rc;
359 350
360 DBF_EVENT(6, "TCHAR:ioct\n");
361
362 device = (struct tape_device *) filp->private_data;
363
364 if (no == MTIOCTOP) { 351 if (no == MTIOCTOP) {
365 struct mtop op; 352 struct mtop op;
366 353
@@ -453,15 +440,30 @@ tapechar_ioctl(struct inode *inp, struct file *filp,
453} 440}
454 441
455static long 442static long
443tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
444{
445 struct tape_device *device;
446 long rc;
447
448 DBF_EVENT(6, "TCHAR:ioct\n");
449
450 device = (struct tape_device *) filp->private_data;
451 mutex_lock(&device->mutex);
452 rc = __tapechar_ioctl(device, no, data);
453 mutex_unlock(&device->mutex);
454 return rc;
455}
456
457static long
456tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data) 458tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
457{ 459{
458 struct tape_device *device = filp->private_data; 460 struct tape_device *device = filp->private_data;
459 int rval = -ENOIOCTLCMD; 461 int rval = -ENOIOCTLCMD;
460 462
461 if (device->discipline->ioctl_fn) { 463 if (device->discipline->ioctl_fn) {
462 lock_kernel(); 464 mutex_lock(&device->mutex);
463 rval = device->discipline->ioctl_fn(device, no, data); 465 rval = device->discipline->ioctl_fn(device, no, data);
464 unlock_kernel(); 466 mutex_unlock(&device->mutex);
465 if (rval == -EINVAL) 467 if (rval == -EINVAL)
466 rval = -ENOIOCTLCMD; 468 rval = -ENOIOCTLCMD;
467 } 469 }
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 5cd31e071647..f5d6802dc5da 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -492,6 +492,7 @@ tape_alloc_device(void)
492 kfree(device); 492 kfree(device);
493 return ERR_PTR(-ENOMEM); 493 return ERR_PTR(-ENOMEM);
494 } 494 }
495 mutex_init(&device->mutex);
495 INIT_LIST_HEAD(&device->req_queue); 496 INIT_LIST_HEAD(&device->req_queue);
496 INIT_LIST_HEAD(&device->node); 497 INIT_LIST_HEAD(&device->node);
497 init_waitqueue_head(&device->state_change_wq); 498 init_waitqueue_head(&device->state_change_wq);
@@ -511,11 +512,12 @@ tape_alloc_device(void)
511 * increment the reference count. 512 * increment the reference count.
512 */ 513 */
513struct tape_device * 514struct tape_device *
514tape_get_device_reference(struct tape_device *device) 515tape_get_device(struct tape_device *device)
515{ 516{
516 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device, 517 int count;
517 atomic_inc_return(&device->ref_count));
518 518
519 count = atomic_inc_return(&device->ref_count);
520 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
519 return device; 521 return device;
520} 522}
521 523
@@ -525,32 +527,25 @@ tape_get_device_reference(struct tape_device *device)
525 * The function returns a NULL pointer to be used by the caller 527 * The function returns a NULL pointer to be used by the caller
526 * for clearing reference pointers. 528 * for clearing reference pointers.
527 */ 529 */
528struct tape_device * 530void
529tape_put_device(struct tape_device *device) 531tape_put_device(struct tape_device *device)
530{ 532{
531 int remain; 533 int count;
532 534
533 remain = atomic_dec_return(&device->ref_count); 535 count = atomic_dec_return(&device->ref_count);
534 if (remain > 0) { 536 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
535 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain); 537 BUG_ON(count < 0);
536 } else { 538 if (count == 0) {
537 if (remain < 0) { 539 kfree(device->modeset_byte);
538 DBF_EVENT(4, "put device without reference\n"); 540 kfree(device);
539 } else {
540 DBF_EVENT(4, "tape_free_device(%p)\n", device);
541 kfree(device->modeset_byte);
542 kfree(device);
543 }
544 } 541 }
545
546 return NULL;
547} 542}
548 543
549/* 544/*
550 * Find tape device by a device index. 545 * Find tape device by a device index.
551 */ 546 */
552struct tape_device * 547struct tape_device *
553tape_get_device(int devindex) 548tape_find_device(int devindex)
554{ 549{
555 struct tape_device *device, *tmp; 550 struct tape_device *device, *tmp;
556 551
@@ -558,7 +553,7 @@ tape_get_device(int devindex)
558 read_lock(&tape_device_lock); 553 read_lock(&tape_device_lock);
559 list_for_each_entry(tmp, &tape_device_list, node) { 554 list_for_each_entry(tmp, &tape_device_list, node) {
560 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 555 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
561 device = tape_get_device_reference(tmp); 556 device = tape_get_device(tmp);
562 break; 557 break;
563 } 558 }
564 } 559 }
@@ -579,7 +574,8 @@ tape_generic_probe(struct ccw_device *cdev)
579 device = tape_alloc_device(); 574 device = tape_alloc_device();
580 if (IS_ERR(device)) 575 if (IS_ERR(device))
581 return -ENODEV; 576 return -ENODEV;
582 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 577 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
578 CCWDEV_DO_MULTIPATH);
583 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 579 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
584 if (ret) { 580 if (ret) {
585 tape_put_device(device); 581 tape_put_device(device);
@@ -606,7 +602,8 @@ __tape_discard_requests(struct tape_device *device)
606 list_del(&request->list); 602 list_del(&request->list);
607 603
608 /* Decrease ref_count for removed request. */ 604 /* Decrease ref_count for removed request. */
609 request->device = tape_put_device(device); 605 request->device = NULL;
606 tape_put_device(device);
610 request->rc = -EIO; 607 request->rc = -EIO;
611 if (request->callback != NULL) 608 if (request->callback != NULL)
612 request->callback(request, request->callback_data); 609 request->callback(request, request->callback_data);
@@ -664,9 +661,11 @@ tape_generic_remove(struct ccw_device *cdev)
664 tape_cleanup_device(device); 661 tape_cleanup_device(device);
665 } 662 }
666 663
667 if (!dev_get_drvdata(&cdev->dev)) { 664 device = dev_get_drvdata(&cdev->dev);
665 if (device) {
668 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 666 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
669 dev_set_drvdata(&cdev->dev, tape_put_device(dev_get_drvdata(&cdev->dev))); 667 dev_set_drvdata(&cdev->dev, NULL);
668 tape_put_device(device);
670 } 669 }
671} 670}
672 671
@@ -721,9 +720,8 @@ tape_free_request (struct tape_request * request)
721{ 720{
722 DBF_LH(6, "Free request %p\n", request); 721 DBF_LH(6, "Free request %p\n", request);
723 722
724 if (request->device != NULL) { 723 if (request->device)
725 request->device = tape_put_device(request->device); 724 tape_put_device(request->device);
726 }
727 kfree(request->cpdata); 725 kfree(request->cpdata);
728 kfree(request->cpaddr); 726 kfree(request->cpaddr);
729 kfree(request); 727 kfree(request);
@@ -838,7 +836,8 @@ static void tape_long_busy_timeout(unsigned long data)
838 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 836 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
839 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 837 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
840 __tape_start_next_request(device); 838 __tape_start_next_request(device);
841 device->lb_timeout.data = (unsigned long) tape_put_device(device); 839 device->lb_timeout.data = 0UL;
840 tape_put_device(device);
842 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 841 spin_unlock_irq(get_ccwdev_lock(device->cdev));
843} 842}
844 843
@@ -918,7 +917,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
918 } 917 }
919 918
920 /* Increase use count of device for the added request. */ 919 /* Increase use count of device for the added request. */
921 request->device = tape_get_device_reference(device); 920 request->device = tape_get_device(device);
922 921
923 if (list_empty(&device->req_queue)) { 922 if (list_empty(&device->req_queue)) {
924 /* No other requests are on the queue. Start this one. */ 923 /* No other requests are on the queue. Start this one. */
@@ -1117,8 +1116,8 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1117 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1116 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1118 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1117 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1119 if (del_timer(&device->lb_timeout)) { 1118 if (del_timer(&device->lb_timeout)) {
1120 device->lb_timeout.data = (unsigned long) 1119 device->lb_timeout.data = 0UL;
1121 tape_put_device(device); 1120 tape_put_device(device);
1122 __tape_start_next_request(device); 1121 __tape_start_next_request(device);
1123 } 1122 }
1124 return; 1123 return;
@@ -1173,7 +1172,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1173 break; 1172 break;
1174 case TAPE_IO_LONG_BUSY: 1173 case TAPE_IO_LONG_BUSY:
1175 device->lb_timeout.data = 1174 device->lb_timeout.data =
1176 (unsigned long)tape_get_device_reference(device); 1175 (unsigned long) tape_get_device(device);
1177 device->lb_timeout.expires = jiffies + 1176 device->lb_timeout.expires = jiffies +
1178 LONG_BUSY_TIMEOUT * HZ; 1177 LONG_BUSY_TIMEOUT * HZ;
1179 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1178 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
@@ -1326,7 +1325,7 @@ EXPORT_SYMBOL(tape_generic_online);
1326EXPORT_SYMBOL(tape_generic_offline); 1325EXPORT_SYMBOL(tape_generic_offline);
1327EXPORT_SYMBOL(tape_generic_pm_suspend); 1326EXPORT_SYMBOL(tape_generic_pm_suspend);
1328EXPORT_SYMBOL(tape_put_device); 1327EXPORT_SYMBOL(tape_put_device);
1329EXPORT_SYMBOL(tape_get_device_reference); 1328EXPORT_SYMBOL(tape_get_device);
1330EXPORT_SYMBOL(tape_state_verbose); 1329EXPORT_SYMBOL(tape_state_verbose);
1331EXPORT_SYMBOL(tape_op_verbose); 1330EXPORT_SYMBOL(tape_op_verbose);
1332EXPORT_SYMBOL(tape_state_set); 1331EXPORT_SYMBOL(tape_state_set);
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 202f42132939..ebd820ccfb24 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -45,7 +45,7 @@ static int tape_proc_show(struct seq_file *m, void *v)
45 seq_printf(m, "TapeNo\tBusID CuType/Model\t" 45 seq_printf(m, "TapeNo\tBusID CuType/Model\t"
46 "DevType/Model\tBlkSize\tState\tOp\tMedState\n"); 46 "DevType/Model\tBlkSize\tState\tOp\tMedState\n");
47 } 47 }
48 device = tape_get_device(n); 48 device = tape_find_device(n);
49 if (IS_ERR(device)) 49 if (IS_ERR(device))
50 return 0; 50 return 0;
51 spin_lock_irq(get_ccwdev_lock(device->cdev)); 51 spin_lock_irq(get_ccwdev_lock(device->cdev));
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 38385677c653..911822db614d 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/bootmem.h> 21#include <linux/bootmem.h>
22#include <linux/compat.h>
22 23
23#include <asm/ccwdev.h> 24#include <asm/ccwdev.h>
24#include <asm/cio.h> 25#include <asm/cio.h>
@@ -1731,6 +1732,22 @@ tty3270_ioctl(struct tty_struct *tty, struct file *file,
1731 return kbd_ioctl(tp->kbd, file, cmd, arg); 1732 return kbd_ioctl(tp->kbd, file, cmd, arg);
1732} 1733}
1733 1734
1735#ifdef CONFIG_COMPAT
1736static long
1737tty3270_compat_ioctl(struct tty_struct *tty, struct file *file,
1738 unsigned int cmd, unsigned long arg)
1739{
1740 struct tty3270 *tp;
1741
1742 tp = tty->driver_data;
1743 if (!tp)
1744 return -ENODEV;
1745 if (tty->flags & (1 << TTY_IO_ERROR))
1746 return -EIO;
1747 return kbd_ioctl(tp->kbd, file, cmd, (unsigned long)compat_ptr(arg));
1748}
1749#endif
1750
1734static const struct tty_operations tty3270_ops = { 1751static const struct tty_operations tty3270_ops = {
1735 .open = tty3270_open, 1752 .open = tty3270_open,
1736 .close = tty3270_close, 1753 .close = tty3270_close,
@@ -1745,6 +1762,9 @@ static const struct tty_operations tty3270_ops = {
1745 .hangup = tty3270_hangup, 1762 .hangup = tty3270_hangup,
1746 .wait_until_sent = tty3270_wait_until_sent, 1763 .wait_until_sent = tty3270_wait_until_sent,
1747 .ioctl = tty3270_ioctl, 1764 .ioctl = tty3270_ioctl,
1765#ifdef CONFIG_COMPAT
1766 .compat_ioctl = tty3270_compat_ioctl,
1767#endif
1748 .set_termios = tty3270_set_termios 1768 .set_termios = tty3270_set_termios
1749}; 1769};
1750 1770
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d1a142fa3eb4..7dfa5412d5a8 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -312,11 +312,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
312 return -ENOSYS; 312 return -ENOSYS;
313 313
314 /* Besure this device hasn't already been opened */ 314 /* Besure this device hasn't already been opened */
315 lock_kernel();
316 spin_lock_bh(&logptr->priv_lock); 315 spin_lock_bh(&logptr->priv_lock);
317 if (logptr->dev_in_use) { 316 if (logptr->dev_in_use) {
318 spin_unlock_bh(&logptr->priv_lock); 317 spin_unlock_bh(&logptr->priv_lock);
319 unlock_kernel();
320 return -EBUSY; 318 return -EBUSY;
321 } 319 }
322 logptr->dev_in_use = 1; 320 logptr->dev_in_use = 1;
@@ -360,9 +358,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
360 || (logptr->iucv_path_severed)); 358 || (logptr->iucv_path_severed));
361 if (logptr->iucv_path_severed) 359 if (logptr->iucv_path_severed)
362 goto out_record; 360 goto out_record;
363 ret = nonseekable_open(inode, filp); 361 nonseekable_open(inode, filp);
364 unlock_kernel(); 362 return 0;
365 return ret;
366 363
367out_record: 364out_record:
368 if (logptr->autorecording) 365 if (logptr->autorecording)
@@ -372,7 +369,6 @@ out_path:
372 logptr->path = NULL; 369 logptr->path = NULL;
373out_dev: 370out_dev:
374 logptr->dev_in_use = 0; 371 logptr->dev_in_use = 0;
375 unlock_kernel();
376 return -EIO; 372 return -EIO;
377} 373}
378 374
@@ -679,7 +675,7 @@ static int vmlogrdr_pm_prepare(struct device *dev)
679} 675}
680 676
681 677
682static struct dev_pm_ops vmlogrdr_pm_ops = { 678static const struct dev_pm_ops vmlogrdr_pm_ops = {
683 .prepare = vmlogrdr_pm_prepare, 679 .prepare = vmlogrdr_pm_prepare,
684}; 680};
685 681
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 77571b68539a..cc56fc708bae 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -695,7 +695,6 @@ static int ur_open(struct inode *inode, struct file *file)
695 695
696 if (accmode == O_RDWR) 696 if (accmode == O_RDWR)
697 return -EACCES; 697 return -EACCES;
698 lock_kernel();
699 /* 698 /*
700 * We treat the minor number as the devno of the ur device 699 * We treat the minor number as the devno of the ur device
701 * to find in the driver tree. 700 * to find in the driver tree.
@@ -749,7 +748,6 @@ static int ur_open(struct inode *inode, struct file *file)
749 goto fail_urfile_free; 748 goto fail_urfile_free;
750 urf->file_reclen = rc; 749 urf->file_reclen = rc;
751 file->private_data = urf; 750 file->private_data = urf;
752 unlock_kernel();
753 return 0; 751 return 0;
754 752
755fail_urfile_free: 753fail_urfile_free:
@@ -761,7 +759,6 @@ fail_unlock:
761fail_put: 759fail_put:
762 urdev_put(urd); 760 urdev_put(urd);
763out: 761out:
764 unlock_kernel();
765 return rc; 762 return rc;
766} 763}
767 764
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index f2bc287b69e4..c974058e48d2 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -19,7 +19,6 @@
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/watchdog.h> 21#include <linux/watchdog.h>
22#include <linux/smp_lock.h>
23 22
24#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
25#include <asm/io.h> 24#include <asm/io.h>
@@ -49,6 +48,8 @@ static unsigned int vmwdt_interval = 60;
49static unsigned long vmwdt_is_open; 48static unsigned long vmwdt_is_open;
50static int vmwdt_expect_close; 49static int vmwdt_expect_close;
51 50
51static DEFINE_MUTEX(vmwdt_mutex);
52
52#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */ 53#define VMWDT_OPEN 0 /* devnode is open or suspend in progress */
53#define VMWDT_RUNNING 1 /* The watchdog is armed */ 54#define VMWDT_RUNNING 1 /* The watchdog is armed */
54 55
@@ -133,15 +134,11 @@ static int __init vmwdt_probe(void)
133static int vmwdt_open(struct inode *i, struct file *f) 134static int vmwdt_open(struct inode *i, struct file *f)
134{ 135{
135 int ret; 136 int ret;
136 lock_kernel(); 137 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
137 if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
138 unlock_kernel();
139 return -EBUSY; 138 return -EBUSY;
140 }
141 ret = vmwdt_keepalive(); 139 ret = vmwdt_keepalive();
142 if (ret) 140 if (ret)
143 clear_bit(VMWDT_OPEN, &vmwdt_is_open); 141 clear_bit(VMWDT_OPEN, &vmwdt_is_open);
144 unlock_kernel();
145 return ret ? ret : nonseekable_open(i, f); 142 return ret ? ret : nonseekable_open(i, f);
146} 143}
147 144
@@ -160,8 +157,7 @@ static struct watchdog_info vmwdt_info = {
160 .identity = "z/VM Watchdog Timer", 157 .identity = "z/VM Watchdog Timer",
161}; 158};
162 159
163static int vmwdt_ioctl(struct inode *i, struct file *f, 160static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
164 unsigned int cmd, unsigned long arg)
165{ 161{
166 switch (cmd) { 162 switch (cmd) {
167 case WDIOC_GETSUPPORT: 163 case WDIOC_GETSUPPORT:
@@ -205,10 +201,19 @@ static int vmwdt_ioctl(struct inode *i, struct file *f,
205 case WDIOC_KEEPALIVE: 201 case WDIOC_KEEPALIVE:
206 return vmwdt_keepalive(); 202 return vmwdt_keepalive();
207 } 203 }
208
209 return -EINVAL; 204 return -EINVAL;
210} 205}
211 206
207static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
208{
209 int rc;
210
211 mutex_lock(&vmwdt_mutex);
212 rc = __vmwdt_ioctl(cmd, arg);
213 mutex_unlock(&vmwdt_mutex);
214 return (long) rc;
215}
216
212static ssize_t vmwdt_write(struct file *f, const char __user *buf, 217static ssize_t vmwdt_write(struct file *f, const char __user *buf,
213 size_t count, loff_t *ppos) 218 size_t count, loff_t *ppos)
214{ 219{
@@ -288,7 +293,7 @@ static struct notifier_block vmwdt_power_notifier = {
288static const struct file_operations vmwdt_fops = { 293static const struct file_operations vmwdt_fops = {
289 .open = &vmwdt_open, 294 .open = &vmwdt_open,
290 .release = &vmwdt_close, 295 .release = &vmwdt_close,
291 .ioctl = &vmwdt_ioctl, 296 .unlocked_ioctl = &vmwdt_ioctl,
292 .write = &vmwdt_write, 297 .write = &vmwdt_write,
293 .owner = THIS_MODULE, 298 .owner = THIS_MODULE,
294}; 299};
@@ -309,6 +314,10 @@ static int __init vmwdt_init(void)
309 ret = register_pm_notifier(&vmwdt_power_notifier); 314 ret = register_pm_notifier(&vmwdt_power_notifier);
310 if (ret) 315 if (ret)
311 return ret; 316 return ret;
317 /*
318 * misc_register() has to be the last action in module_init(), because
319 * file operations will be available right after this.
320 */
312 ret = misc_register(&vmwdt_dev); 321 ret = misc_register(&vmwdt_dev);
313 if (ret) { 322 if (ret) {
314 unregister_pm_notifier(&vmwdt_power_notifier); 323 unregister_pm_notifier(&vmwdt_power_notifier);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index c431198bdbc4..82daa3c1dc9c 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -14,7 +14,6 @@
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
17#include <linux/utsname.h>
18#include <linux/debugfs.h> 17#include <linux/debugfs.h>
19#include <asm/ipl.h> 18#include <asm/ipl.h>
20#include <asm/sclp.h> 19#include <asm/sclp.h>
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index fa4c9662f65e..d033414f7599 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
6 fcx.o itcw.o crw.o 6 fcx.o itcw.o crw.o ccwreq.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 6565f027791e..7eab9ab9f406 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -265,13 +265,11 @@ struct ccwdev_iter {
265static void * 265static void *
266cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset) 266cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
267{ 267{
268 struct ccwdev_iter *iter; 268 struct ccwdev_iter *iter = s->private;
269 269
270 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1)) 270 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
271 return NULL; 271 return NULL;
272 iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL); 272 memset(iter, 0, sizeof(*iter));
273 if (!iter)
274 return ERR_PTR(-ENOMEM);
275 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1); 273 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
276 iter->devno = *offset % (__MAX_SUBCHANNEL + 1); 274 iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
277 return iter; 275 return iter;
@@ -280,8 +278,6 @@ cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
280static void 278static void
281cio_ignore_proc_seq_stop(struct seq_file *s, void *it) 279cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
282{ 280{
283 if (!IS_ERR(it))
284 kfree(it);
285} 281}
286 282
287static void * 283static void *
@@ -378,14 +374,15 @@ static const struct seq_operations cio_ignore_proc_seq_ops = {
378static int 374static int
379cio_ignore_proc_open(struct inode *inode, struct file *file) 375cio_ignore_proc_open(struct inode *inode, struct file *file)
380{ 376{
381 return seq_open(file, &cio_ignore_proc_seq_ops); 377 return seq_open_private(file, &cio_ignore_proc_seq_ops,
378 sizeof(struct ccwdev_iter));
382} 379}
383 380
384static const struct file_operations cio_ignore_proc_fops = { 381static const struct file_operations cio_ignore_proc_fops = {
385 .open = cio_ignore_proc_open, 382 .open = cio_ignore_proc_open,
386 .read = seq_read, 383 .read = seq_read,
387 .llseek = seq_lseek, 384 .llseek = seq_lseek,
388 .release = seq_release, 385 .release = seq_release_private,
389 .write = cio_ignore_write, 386 .write = cio_ignore_write,
390}; 387};
391 388
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index a5a62f1f7747..5f97ea2ee6b1 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -560,7 +560,7 @@ static int ccwgroup_pm_restore(struct device *dev)
560 return gdrv->restore ? gdrv->restore(gdev) : 0; 560 return gdrv->restore ? gdrv->restore(gdev) : 0;
561} 561}
562 562
563static struct dev_pm_ops ccwgroup_pm_ops = { 563static const struct dev_pm_ops ccwgroup_pm_ops = {
564 .prepare = ccwgroup_pm_prepare, 564 .prepare = ccwgroup_pm_prepare,
565 .complete = ccwgroup_pm_complete, 565 .complete = ccwgroup_pm_complete,
566 .freeze = ccwgroup_pm_freeze, 566 .freeze = ccwgroup_pm_freeze,
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 000000000000..9509e3860934
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,328 @@
1/*
2 * Handling of internal CCW device requests.
3 *
4 * Copyright IBM Corp. 2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/err.h>
10#include <asm/ccwdev.h>
11#include <asm/cio.h>
12
13#include "io_sch.h"
14#include "cio.h"
15#include "device.h"
16#include "cio_debug.h"
17
18/**
19 * lpm_adjust - adjust path mask
20 * @lpm: path mask to adjust
21 * @mask: mask of available paths
22 *
23 * Shift @lpm right until @lpm and @mask have at least one bit in common or
24 * until @lpm is zero. Return the resulting lpm.
25 */
26int lpm_adjust(int lpm, int mask)
27{
28 while (lpm && ((lpm & mask) == 0))
29 lpm >>= 1;
30 return lpm;
31}
32
33/*
34 * Adjust path mask to use next path and reset retry count. Return resulting
35 * path mask.
36 */
37static u16 ccwreq_next_path(struct ccw_device *cdev)
38{
39 struct ccw_request *req = &cdev->private->req;
40
41 req->retries = req->maxretries;
42 req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
43
44 return req->mask;
45}
46
47/*
48 * Clean up device state and report to callback.
49 */
50static void ccwreq_stop(struct ccw_device *cdev, int rc)
51{
52 struct subchannel *sch = to_subchannel(cdev->dev.parent);
53 struct ccw_request *req = &cdev->private->req;
54
55 if (req->done)
56 return;
57 req->done = 1;
58 ccw_device_set_timeout(cdev, 0);
59 memset(&cdev->private->irb, 0, sizeof(struct irb));
60 sch->lpm = sch->schib.pmcw.pam;
61 if (rc && rc != -ENODEV && req->drc)
62 rc = req->drc;
63 req->callback(cdev, req->data, rc);
64}
65
66/*
67 * (Re-)Start the operation until retries and paths are exhausted.
68 */
69static void ccwreq_do(struct ccw_device *cdev)
70{
71 struct ccw_request *req = &cdev->private->req;
72 struct subchannel *sch = to_subchannel(cdev->dev.parent);
73 struct ccw1 *cp = req->cp;
74 int rc = -EACCES;
75
76 while (req->mask) {
77 if (req->retries-- == 0) {
78 /* Retries exhausted, try next path. */
79 ccwreq_next_path(cdev);
80 continue;
81 }
82 /* Perform start function. */
83 sch->lpm = 0xff;
84 memset(&cdev->private->irb, 0, sizeof(struct irb));
85 rc = cio_start(sch, cp, (u8) req->mask);
86 if (rc == 0) {
87 /* I/O started successfully. */
88 ccw_device_set_timeout(cdev, req->timeout);
89 return;
90 }
91 if (rc == -ENODEV) {
92 /* Permanent device error. */
93 break;
94 }
95 if (rc == -EACCES) {
96 /* Permant path error. */
97 ccwreq_next_path(cdev);
98 continue;
99 }
100 /* Temporary improper status. */
101 rc = cio_clear(sch);
102 if (rc)
103 break;
104 return;
105 }
106 ccwreq_stop(cdev, rc);
107}
108
109/**
110 * ccw_request_start - perform I/O request
111 * @cdev: ccw device
112 *
113 * Perform the I/O request specified by cdev->req.
114 */
115void ccw_request_start(struct ccw_device *cdev)
116{
117 struct ccw_request *req = &cdev->private->req;
118
119 /* Try all paths twice to counter link flapping. */
120 req->mask = 0x8080;
121 req->retries = req->maxretries;
122 req->mask = lpm_adjust(req->mask, req->lpm);
123 req->drc = 0;
124 req->done = 0;
125 req->cancel = 0;
126 if (!req->mask)
127 goto out_nopath;
128 ccwreq_do(cdev);
129 return;
130
131out_nopath:
132 ccwreq_stop(cdev, -EACCES);
133}
134
135/**
136 * ccw_request_cancel - cancel running I/O request
137 * @cdev: ccw device
138 *
139 * Cancel the I/O request specified by cdev->req. Return non-zero if request
140 * has already finished, zero otherwise.
141 */
142int ccw_request_cancel(struct ccw_device *cdev)
143{
144 struct subchannel *sch = to_subchannel(cdev->dev.parent);
145 struct ccw_request *req = &cdev->private->req;
146 int rc;
147
148 if (req->done)
149 return 1;
150 req->cancel = 1;
151 rc = cio_clear(sch);
152 if (rc)
153 ccwreq_stop(cdev, rc);
154 return 0;
155}
156
157/*
158 * Return the status of the internal I/O started on the specified ccw device.
159 * Perform BASIC SENSE if required.
160 */
161static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
162{
163 struct irb *irb = &cdev->private->irb;
164 struct cmd_scsw *scsw = &irb->scsw.cmd;
165
166 /* Perform BASIC SENSE if needed. */
167 if (ccw_device_accumulate_and_sense(cdev, lcirb))
168 return IO_RUNNING;
169 /* Check for halt/clear interrupt. */
170 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
171 return IO_KILLED;
172 /* Check for path error. */
173 if (scsw->cc == 3 || scsw->pno)
174 return IO_PATH_ERROR;
175 /* Handle BASIC SENSE data. */
176 if (irb->esw.esw0.erw.cons) {
177 CIO_TRACE_EVENT(2, "sensedata");
178 CIO_HEX_EVENT(2, &cdev->private->dev_id,
179 sizeof(struct ccw_dev_id));
180 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
181 /* Check for command reject. */
182 if (irb->ecw[0] & SNS0_CMD_REJECT)
183 return IO_REJECTED;
184 /* Assume that unexpected SENSE data implies an error. */
185 return IO_STATUS_ERROR;
186 }
187 /* Check for channel errors. */
188 if (scsw->cstat != 0)
189 return IO_STATUS_ERROR;
190 /* Check for device errors. */
191 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
192 return IO_STATUS_ERROR;
193 /* Check for final state. */
194 if (!(scsw->dstat & DEV_STAT_DEV_END))
195 return IO_RUNNING;
196 /* Check for other improper status. */
197 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
198 return IO_STATUS_ERROR;
199 return IO_DONE;
200}
201
202/*
203 * Log ccw request status.
204 */
205static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
206{
207 struct ccw_request *req = &cdev->private->req;
208 struct {
209 struct ccw_dev_id dev_id;
210 u16 retries;
211 u8 lpm;
212 u8 status;
213 } __attribute__ ((packed)) data;
214 data.dev_id = cdev->private->dev_id;
215 data.retries = req->retries;
216 data.lpm = (u8) req->mask;
217 data.status = (u8) status;
218 CIO_TRACE_EVENT(2, "reqstat");
219 CIO_HEX_EVENT(2, &data, sizeof(data));
220}
221
222/**
223 * ccw_request_handler - interrupt handler for I/O request procedure.
224 * @cdev: ccw device
225 *
226 * Handle interrupt during I/O request procedure.
227 */
228void ccw_request_handler(struct ccw_device *cdev)
229{
230 struct ccw_request *req = &cdev->private->req;
231 struct irb *irb = (struct irb *) __LC_IRB;
232 enum io_status status;
233 int rc = -EOPNOTSUPP;
234
235 /* Check status of I/O request. */
236 status = ccwreq_status(cdev, irb);
237 if (req->filter)
238 status = req->filter(cdev, req->data, irb, status);
239 if (status != IO_RUNNING)
240 ccw_device_set_timeout(cdev, 0);
241 if (status != IO_DONE && status != IO_RUNNING)
242 ccwreq_log_status(cdev, status);
243 switch (status) {
244 case IO_DONE:
245 break;
246 case IO_RUNNING:
247 return;
248 case IO_REJECTED:
249 goto err;
250 case IO_PATH_ERROR:
251 goto out_next_path;
252 case IO_STATUS_ERROR:
253 goto out_restart;
254 case IO_KILLED:
255 /* Check if request was cancelled on purpose. */
256 if (req->cancel) {
257 rc = -EIO;
258 goto err;
259 }
260 goto out_restart;
261 }
262 /* Check back with request initiator. */
263 if (!req->check)
264 goto out;
265 switch (req->check(cdev, req->data)) {
266 case 0:
267 break;
268 case -EAGAIN:
269 goto out_restart;
270 case -EACCES:
271 goto out_next_path;
272 default:
273 goto err;
274 }
275out:
276 ccwreq_stop(cdev, 0);
277 return;
278
279out_next_path:
280 /* Try next path and restart I/O. */
281 if (!ccwreq_next_path(cdev)) {
282 rc = -EACCES;
283 goto err;
284 }
285out_restart:
286 /* Restart. */
287 ccwreq_do(cdev);
288 return;
289err:
290 ccwreq_stop(cdev, rc);
291}
292
293
294/**
295 * ccw_request_timeout - timeout handler for I/O request procedure
296 * @cdev: ccw device
297 *
298 * Handle timeout during I/O request procedure.
299 */
300void ccw_request_timeout(struct ccw_device *cdev)
301{
302 struct subchannel *sch = to_subchannel(cdev->dev.parent);
303 struct ccw_request *req = &cdev->private->req;
304 int rc;
305
306 if (!ccwreq_next_path(cdev)) {
307 /* set the final return code for this request */
308 req->drc = -ETIME;
309 }
310 rc = cio_clear(sch);
311 if (rc)
312 goto err;
313 return;
314
315err:
316 ccwreq_stop(cdev, rc);
317}
318
319/**
320 * ccw_request_notoper - notoper handler for I/O request procedure
321 * @cdev: ccw device
322 *
323 * Handle timeout during I/O request procedure.
324 */
325void ccw_request_notoper(struct ccw_device *cdev)
326{
327 ccwreq_stop(cdev, -ENODEV);
328}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 40002830d48a..c268a2e5b7c3 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -65,7 +65,7 @@ static void set_chp_logically_online(struct chp_id chpid, int onoff)
65 chpid_to_chp(chpid)->state = onoff; 65 chpid_to_chp(chpid)->state = onoff;
66} 66}
67 67
68/* On succes return 0 if channel-path is varied offline, 1 if it is varied 68/* On success return 0 if channel-path is varied offline, 1 if it is varied
69 * online. Return -ENODEV if channel-path is not registered. */ 69 * online. Return -ENODEV if channel-path is not registered. */
70int chp_get_status(struct chp_id chpid) 70int chp_get_status(struct chp_id chpid)
71{ 71{
@@ -393,7 +393,6 @@ int chp_new(struct chp_id chpid)
393 chp->state = 1; 393 chp->state = 1;
394 chp->dev.parent = &channel_subsystems[chpid.cssid]->device; 394 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
395 chp->dev.release = chp_release; 395 chp->dev.release = chp_release;
396 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
397 396
398 /* Obtain channel path description and fill it in. */ 397 /* Obtain channel path description and fill it in. */
399 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); 398 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
@@ -411,6 +410,7 @@ int chp_new(struct chp_id chpid)
411 } else { 410 } else {
412 chp->cmg = -1; 411 chp->cmg = -1;
413 } 412 }
413 dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
414 414
415 /* make it known to the system */ 415 /* make it known to the system */
416 ret = device_register(&chp->dev); 416 ret = device_register(&chp->dev);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 138124fcfcad..126f240715a4 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -618,6 +618,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
618 old_regs = set_irq_regs(regs); 618 old_regs = set_irq_regs(regs);
619 s390_idle_check(); 619 s390_idle_check();
620 irq_enter(); 620 irq_enter();
621 __get_cpu_var(s390_idle).nohz_delay = 1;
621 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 622 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
622 /* Serve timer interrupts first. */ 623 /* Serve timer interrupts first. */
623 clock_comparator_work(); 624 clock_comparator_work();
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 2e43558c704b..bf7f80f5a330 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,11 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71enum sch_todo {
72 SCH_TODO_NOTHING,
73 SCH_TODO_UNREG,
74};
75
71/* subchannel data structure used by I/O subroutines */ 76/* subchannel data structure used by I/O subroutines */
72struct subchannel { 77struct subchannel {
73 struct subchannel_id schid; 78 struct subchannel_id schid;
@@ -95,7 +100,8 @@ struct subchannel {
95 struct device dev; /* entry in device tree */ 100 struct device dev; /* entry in device tree */
96 struct css_driver *driver; 101 struct css_driver *driver;
97 void *private; /* private per subchannel type data */ 102 void *private; /* private per subchannel type data */
98 struct work_struct work; 103 enum sch_todo todo;
104 struct work_struct todo_work;
99 struct schib_config config; 105 struct schib_config config;
100} __attribute__ ((aligned(8))); 106} __attribute__ ((aligned(8)));
101 107
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 30f516111307..2985eb439485 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -462,7 +462,7 @@ static struct cmb_area cmb_area = {
462 * block of memory, which can not be moved as long as any channel 462 * block of memory, which can not be moved as long as any channel
463 * is active. Therefore, a maximum number of subchannels needs to 463 * is active. Therefore, a maximum number of subchannels needs to
464 * be defined somewhere. This is a module parameter, defaulting to 464 * be defined somewhere. This is a module parameter, defaulting to
465 * a resonable value of 1024, or 32 kb of memory. 465 * a reasonable value of 1024, or 32 kb of memory.
466 * Current kernels don't allow kmalloc with more than 128kb, so the 466 * Current kernels don't allow kmalloc with more than 128kb, so the
467 * maximum is 4096. 467 * maximum is 4096.
468 */ 468 */
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 91c25706fa83..7679aee6fa14 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -133,6 +133,8 @@ out:
133 return rc; 133 return rc;
134} 134}
135 135
136static void css_sch_todo(struct work_struct *work);
137
136static struct subchannel * 138static struct subchannel *
137css_alloc_subchannel(struct subchannel_id schid) 139css_alloc_subchannel(struct subchannel_id schid)
138{ 140{
@@ -147,6 +149,7 @@ css_alloc_subchannel(struct subchannel_id schid)
147 kfree(sch); 149 kfree(sch);
148 return ERR_PTR(ret); 150 return ERR_PTR(ret);
149 } 151 }
152 INIT_WORK(&sch->todo_work, css_sch_todo);
150 return sch; 153 return sch;
151} 154}
152 155
@@ -190,6 +193,51 @@ void css_sch_device_unregister(struct subchannel *sch)
190} 193}
191EXPORT_SYMBOL_GPL(css_sch_device_unregister); 194EXPORT_SYMBOL_GPL(css_sch_device_unregister);
192 195
196static void css_sch_todo(struct work_struct *work)
197{
198 struct subchannel *sch;
199 enum sch_todo todo;
200
201 sch = container_of(work, struct subchannel, todo_work);
202 /* Find out todo. */
203 spin_lock_irq(sch->lock);
204 todo = sch->todo;
205 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
206 sch->schid.sch_no, todo);
207 sch->todo = SCH_TODO_NOTHING;
208 spin_unlock_irq(sch->lock);
209 /* Perform todo. */
210 if (todo == SCH_TODO_UNREG)
211 css_sch_device_unregister(sch);
212 /* Release workqueue ref. */
213 put_device(&sch->dev);
214}
215
216/**
217 * css_sched_sch_todo - schedule a subchannel operation
218 * @sch: subchannel
219 * @todo: todo
220 *
221 * Schedule the operation identified by @todo to be performed on the slow path
222 * workqueue. Do nothing if another operation with higher priority is already
223 * scheduled. Needs to be called with subchannel lock held.
224 */
225void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
226{
227 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
228 sch->schid.ssid, sch->schid.sch_no, todo);
229 if (sch->todo >= todo)
230 return;
231 /* Get workqueue ref. */
232 if (!get_device(&sch->dev))
233 return;
234 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev);
238 }
239}
240
193static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 241static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
194{ 242{
195 int i; 243 int i;
@@ -376,8 +424,8 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
376 /* Unusable - ignore. */ 424 /* Unusable - ignore. */
377 return 0; 425 return 0;
378 } 426 }
379 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 427 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
380 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 428 schid.sch_no);
381 429
382 return css_probe_device(schid); 430 return css_probe_device(schid);
383} 431}
@@ -394,6 +442,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
394 "Got subchannel machine check but " 442 "Got subchannel machine check but "
395 "no sch_event handler provided.\n"); 443 "no sch_event handler provided.\n");
396 } 444 }
445 if (ret != 0 && ret != -EAGAIN) {
446 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
447 sch->schid.ssid, sch->schid.sch_no, ret);
448 }
397 return ret; 449 return ret;
398} 450}
399 451
@@ -684,6 +736,7 @@ static int __init setup_css(int nr)
684 css->pseudo_subchannel->dev.parent = &css->device; 736 css->pseudo_subchannel->dev.parent = &css->device;
685 css->pseudo_subchannel->dev.release = css_subchannel_release; 737 css->pseudo_subchannel->dev.release = css_subchannel_release;
686 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 738 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
739 mutex_init(&css->pseudo_subchannel->reg_mutex);
687 ret = cio_create_sch_lock(css->pseudo_subchannel); 740 ret = cio_create_sch_lock(css->pseudo_subchannel);
688 if (ret) { 741 if (ret) {
689 kfree(css->pseudo_subchannel); 742 kfree(css->pseudo_subchannel);
@@ -1095,7 +1148,7 @@ static int css_pm_restore(struct device *dev)
1095 return drv->restore ? drv->restore(sch) : 0; 1148 return drv->restore ? drv->restore(sch) : 0;
1096} 1149}
1097 1150
1098static struct dev_pm_ops css_pm_ops = { 1151static const struct dev_pm_ops css_pm_ops = {
1099 .prepare = css_pm_prepare, 1152 .prepare = css_pm_prepare,
1100 .complete = css_pm_complete, 1153 .complete = css_pm_complete,
1101 .freeze = css_pm_freeze, 1154 .freeze = css_pm_freeze,
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 68d6b0bf151c..fe84b92cde60 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12#include <asm/schid.h> 12#include <asm/schid.h>
13 13
14#include "cio.h"
15
14/* 16/*
15 * path grouping stuff 17 * path grouping stuff
16 */ 18 */
@@ -151,4 +153,5 @@ int css_sch_is_valid(struct schib *);
151 153
152extern struct workqueue_struct *slow_path_wq; 154extern struct workqueue_struct *slow_path_wq;
153void css_wait_for_slow_path(void); 155void css_wait_for_slow_path(void);
156void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
154#endif 157#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f780bdd3a04e..73901c9e260f 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -7,6 +7,10 @@
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 9 */
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
10#include <linux/module.h> 14#include <linux/module.h>
11#include <linux/init.h> 15#include <linux/init.h>
12#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -299,53 +303,18 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
299 303
300static void ccw_device_unregister(struct ccw_device *cdev) 304static void ccw_device_unregister(struct ccw_device *cdev)
301{ 305{
302 if (test_and_clear_bit(1, &cdev->private->registered)) { 306 if (device_is_registered(&cdev->dev)) {
307 /* Undo device_add(). */
303 device_del(&cdev->dev); 308 device_del(&cdev->dev);
309 }
310 if (cdev->private->flags.initialized) {
311 cdev->private->flags.initialized = 0;
304 /* Release reference from device_initialize(). */ 312 /* Release reference from device_initialize(). */
305 put_device(&cdev->dev); 313 put_device(&cdev->dev);
306 } 314 }
307} 315}
308 316
309static void ccw_device_remove_orphan_cb(struct work_struct *work) 317static void io_subchannel_quiesce(struct subchannel *);
310{
311 struct ccw_device_private *priv;
312 struct ccw_device *cdev;
313
314 priv = container_of(work, struct ccw_device_private, kick_work);
315 cdev = priv->cdev;
316 ccw_device_unregister(cdev);
317 /* Release cdev reference for workqueue processing. */
318 put_device(&cdev->dev);
319}
320
321static void
322ccw_device_remove_disconnected(struct ccw_device *cdev)
323{
324 unsigned long flags;
325
326 /*
327 * Forced offline in disconnected state means
328 * 'throw away device'.
329 */
330 if (ccw_device_is_orphan(cdev)) {
331 /*
332 * Deregister ccw device.
333 * Unfortunately, we cannot do this directly from the
334 * attribute method.
335 */
336 /* Get cdev reference for workqueue processing. */
337 if (!get_device(&cdev->dev))
338 return;
339 spin_lock_irqsave(cdev->ccwlock, flags);
340 cdev->private->state = DEV_STATE_NOT_OPER;
341 spin_unlock_irqrestore(cdev->ccwlock, flags);
342 PREPARE_WORK(&cdev->private->kick_work,
343 ccw_device_remove_orphan_cb);
344 queue_work(slow_path_wq, &cdev->private->kick_work);
345 } else
346 /* Deregister subchannel, which will kill the ccw device. */
347 ccw_device_schedule_sch_unregister(cdev);
348}
349 318
350/** 319/**
351 * ccw_device_set_offline() - disable a ccw device for I/O 320 * ccw_device_set_offline() - disable a ccw device for I/O
@@ -360,7 +329,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
360 */ 329 */
361int ccw_device_set_offline(struct ccw_device *cdev) 330int ccw_device_set_offline(struct ccw_device *cdev)
362{ 331{
363 int ret; 332 struct subchannel *sch;
333 int ret, state;
364 334
365 if (!cdev) 335 if (!cdev)
366 return -ENODEV; 336 return -ENODEV;
@@ -374,6 +344,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
374 } 344 }
375 cdev->online = 0; 345 cdev->online = 0;
376 spin_lock_irq(cdev->ccwlock); 346 spin_lock_irq(cdev->ccwlock);
347 sch = to_subchannel(cdev->dev.parent);
377 /* Wait until a final state or DISCONNECTED is reached */ 348 /* Wait until a final state or DISCONNECTED is reached */
378 while (!dev_fsm_final_state(cdev) && 349 while (!dev_fsm_final_state(cdev) &&
379 cdev->private->state != DEV_STATE_DISCONNECTED) { 350 cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -382,20 +353,37 @@ int ccw_device_set_offline(struct ccw_device *cdev)
382 cdev->private->state == DEV_STATE_DISCONNECTED)); 353 cdev->private->state == DEV_STATE_DISCONNECTED));
383 spin_lock_irq(cdev->ccwlock); 354 spin_lock_irq(cdev->ccwlock);
384 } 355 }
385 ret = ccw_device_offline(cdev); 356 do {
386 if (ret) 357 ret = ccw_device_offline(cdev);
387 goto error; 358 if (!ret)
359 break;
360 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
361 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
362 cdev->private->dev_id.devno);
363 if (ret != -EBUSY)
364 goto error;
365 state = cdev->private->state;
366 spin_unlock_irq(cdev->ccwlock);
367 io_subchannel_quiesce(sch);
368 spin_lock_irq(cdev->ccwlock);
369 cdev->private->state = state;
370 } while (ret == -EBUSY);
388 spin_unlock_irq(cdev->ccwlock); 371 spin_unlock_irq(cdev->ccwlock);
389 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 372 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
390 cdev->private->state == DEV_STATE_DISCONNECTED)); 373 cdev->private->state == DEV_STATE_DISCONNECTED));
374 /* Inform the user if set offline failed. */
375 if (cdev->private->state == DEV_STATE_BOXED) {
376 pr_warning("%s: The device entered boxed state while "
377 "being set offline\n", dev_name(&cdev->dev));
378 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
379 pr_warning("%s: The device stopped operating while "
380 "being set offline\n", dev_name(&cdev->dev));
381 }
391 /* Give up reference from ccw_device_set_online(). */ 382 /* Give up reference from ccw_device_set_online(). */
392 put_device(&cdev->dev); 383 put_device(&cdev->dev);
393 return 0; 384 return 0;
394 385
395error: 386error:
396 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
397 ret, cdev->private->dev_id.ssid,
398 cdev->private->dev_id.devno);
399 cdev->private->state = DEV_STATE_OFFLINE; 387 cdev->private->state = DEV_STATE_OFFLINE;
400 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 388 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
401 spin_unlock_irq(cdev->ccwlock); 389 spin_unlock_irq(cdev->ccwlock);
@@ -448,6 +436,16 @@ int ccw_device_set_online(struct ccw_device *cdev)
448 if ((cdev->private->state != DEV_STATE_ONLINE) && 436 if ((cdev->private->state != DEV_STATE_ONLINE) &&
449 (cdev->private->state != DEV_STATE_W4SENSE)) { 437 (cdev->private->state != DEV_STATE_W4SENSE)) {
450 spin_unlock_irq(cdev->ccwlock); 438 spin_unlock_irq(cdev->ccwlock);
439 /* Inform the user that set online failed. */
440 if (cdev->private->state == DEV_STATE_BOXED) {
441 pr_warning("%s: Setting the device online failed "
442 "because it is boxed\n",
443 dev_name(&cdev->dev));
444 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
445 pr_warning("%s: Setting the device online failed "
446 "because it is not operational\n",
447 dev_name(&cdev->dev));
448 }
451 /* Give up online reference since onlining failed. */ 449 /* Give up online reference since onlining failed. */
452 put_device(&cdev->dev); 450 put_device(&cdev->dev);
453 return -ENODEV; 451 return -ENODEV;
@@ -494,27 +492,22 @@ error:
494 492
495static int online_store_handle_offline(struct ccw_device *cdev) 493static int online_store_handle_offline(struct ccw_device *cdev)
496{ 494{
497 if (cdev->private->state == DEV_STATE_DISCONNECTED) 495 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
498 ccw_device_remove_disconnected(cdev); 496 spin_lock_irq(cdev->ccwlock);
499 else if (cdev->online && cdev->drv && cdev->drv->set_offline) 497 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
498 spin_unlock_irq(cdev->ccwlock);
499 } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500 return ccw_device_set_offline(cdev); 500 return ccw_device_set_offline(cdev);
501 return 0; 501 return 0;
502} 502}
503 503
504static int online_store_recog_and_online(struct ccw_device *cdev) 504static int online_store_recog_and_online(struct ccw_device *cdev)
505{ 505{
506 int ret;
507
508 /* Do device recognition, if needed. */ 506 /* Do device recognition, if needed. */
509 if (cdev->private->state == DEV_STATE_BOXED) { 507 if (cdev->private->state == DEV_STATE_BOXED) {
510 ret = ccw_device_recognition(cdev); 508 spin_lock_irq(cdev->ccwlock);
511 if (ret) { 509 ccw_device_recognition(cdev);
512 CIO_MSG_EVENT(0, "Couldn't start recognition " 510 spin_unlock_irq(cdev->ccwlock);
513 "for device 0.%x.%04x (ret=%d)\n",
514 cdev->private->dev_id.ssid,
515 cdev->private->dev_id.devno, ret);
516 return ret;
517 }
518 wait_event(cdev->private->wait_q, 511 wait_event(cdev->private->wait_q,
519 cdev->private->flags.recog_done); 512 cdev->private->flags.recog_done);
520 if (cdev->private->state != DEV_STATE_OFFLINE) 513 if (cdev->private->state != DEV_STATE_OFFLINE)
@@ -553,11 +546,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
553 int force, ret; 546 int force, ret;
554 unsigned long i; 547 unsigned long i;
555 548
556 if ((cdev->private->state != DEV_STATE_OFFLINE && 549 if (!dev_fsm_final_state(cdev) &&
557 cdev->private->state != DEV_STATE_ONLINE && 550 cdev->private->state != DEV_STATE_DISCONNECTED)
558 cdev->private->state != DEV_STATE_BOXED && 551 return -EAGAIN;
559 cdev->private->state != DEV_STATE_DISCONNECTED) || 552 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
560 atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
561 return -EAGAIN; 553 return -EAGAIN;
562 554
563 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 555 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -665,81 +657,31 @@ static int ccw_device_register(struct ccw_device *cdev)
665 cdev->private->dev_id.devno); 657 cdev->private->dev_id.devno);
666 if (ret) 658 if (ret)
667 return ret; 659 return ret;
668 ret = device_add(dev); 660 return device_add(dev);
669 if (ret)
670 return ret;
671
672 set_bit(1, &cdev->private->registered);
673 return ret;
674} 661}
675 662
676struct match_data { 663static int match_dev_id(struct device *dev, void *data)
677 struct ccw_dev_id dev_id;
678 struct ccw_device * sibling;
679};
680
681static int
682match_devno(struct device * dev, void * data)
683{
684 struct match_data * d = data;
685 struct ccw_device * cdev;
686
687 cdev = to_ccwdev(dev);
688 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
689 !ccw_device_is_orphan(cdev) &&
690 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
691 (cdev != d->sibling))
692 return 1;
693 return 0;
694}
695
696static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
697 struct ccw_device *sibling)
698{
699 struct device *dev;
700 struct match_data data;
701
702 data.dev_id = *dev_id;
703 data.sibling = sibling;
704 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
705
706 return dev ? to_ccwdev(dev) : NULL;
707}
708
709static int match_orphan(struct device *dev, void *data)
710{ 664{
711 struct ccw_dev_id *dev_id; 665 struct ccw_device *cdev = to_ccwdev(dev);
712 struct ccw_device *cdev; 666 struct ccw_dev_id *dev_id = data;
713 667
714 dev_id = data;
715 cdev = to_ccwdev(dev);
716 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 668 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
717} 669}
718 670
719static struct ccw_device * 671static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
720get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
721 struct ccw_dev_id *dev_id)
722{ 672{
723 struct device *dev; 673 struct device *dev;
724 674
725 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id, 675 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
726 match_orphan);
727 676
728 return dev ? to_ccwdev(dev) : NULL; 677 return dev ? to_ccwdev(dev) : NULL;
729} 678}
730 679
731void ccw_device_do_unbind_bind(struct work_struct *work) 680static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
732{ 681{
733 struct ccw_device_private *priv;
734 struct ccw_device *cdev;
735 struct subchannel *sch;
736 int ret; 682 int ret;
737 683
738 priv = container_of(work, struct ccw_device_private, kick_work); 684 if (device_is_registered(&cdev->dev)) {
739 cdev = priv->cdev;
740 sch = to_subchannel(cdev->dev.parent);
741
742 if (test_bit(1, &cdev->private->registered)) {
743 device_release_driver(&cdev->dev); 685 device_release_driver(&cdev->dev);
744 ret = device_attach(&cdev->dev); 686 ret = device_attach(&cdev->dev);
745 WARN_ON(ret == -ENODEV); 687 WARN_ON(ret == -ENODEV);
@@ -773,6 +715,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
773 return ERR_PTR(-ENOMEM); 715 return ERR_PTR(-ENOMEM);
774} 716}
775 717
718static void ccw_device_todo(struct work_struct *work);
719
776static int io_subchannel_initialize_dev(struct subchannel *sch, 720static int io_subchannel_initialize_dev(struct subchannel *sch,
777 struct ccw_device *cdev) 721 struct ccw_device *cdev)
778{ 722{
@@ -780,7 +724,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
780 atomic_set(&cdev->private->onoff, 0); 724 atomic_set(&cdev->private->onoff, 0);
781 cdev->dev.parent = &sch->dev; 725 cdev->dev.parent = &sch->dev;
782 cdev->dev.release = ccw_device_release; 726 cdev->dev.release = ccw_device_release;
783 INIT_WORK(&cdev->private->kick_work, NULL); 727 INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
784 cdev->dev.groups = ccwdev_attr_groups; 728 cdev->dev.groups = ccwdev_attr_groups;
785 /* Do first half of device_register. */ 729 /* Do first half of device_register. */
786 device_initialize(&cdev->dev); 730 device_initialize(&cdev->dev);
@@ -789,6 +733,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
789 put_device(&cdev->dev); 733 put_device(&cdev->dev);
790 return -ENODEV; 734 return -ENODEV;
791 } 735 }
736 cdev->private->flags.initialized = 1;
792 return 0; 737 return 0;
793} 738}
794 739
@@ -806,76 +751,7 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
806 return cdev; 751 return cdev;
807} 752}
808 753
809static int io_subchannel_recog(struct ccw_device *, struct subchannel *); 754static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
810
811static void sch_attach_device(struct subchannel *sch,
812 struct ccw_device *cdev)
813{
814 css_update_ssd_info(sch);
815 spin_lock_irq(sch->lock);
816 sch_set_cdev(sch, cdev);
817 cdev->private->schid = sch->schid;
818 cdev->ccwlock = sch->lock;
819 ccw_device_trigger_reprobe(cdev);
820 spin_unlock_irq(sch->lock);
821}
822
823static void sch_attach_disconnected_device(struct subchannel *sch,
824 struct ccw_device *cdev)
825{
826 struct subchannel *other_sch;
827 int ret;
828
829 /* Get reference for new parent. */
830 if (!get_device(&sch->dev))
831 return;
832 other_sch = to_subchannel(cdev->dev.parent);
833 /* Note: device_move() changes cdev->dev.parent */
834 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
835 if (ret) {
836 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
837 "(ret=%d)!\n", cdev->private->dev_id.ssid,
838 cdev->private->dev_id.devno, ret);
839 /* Put reference for new parent. */
840 put_device(&sch->dev);
841 return;
842 }
843 sch_set_cdev(other_sch, NULL);
844 /* No need to keep a subchannel without ccw device around. */
845 css_sch_device_unregister(other_sch);
846 sch_attach_device(sch, cdev);
847 /* Put reference for old parent. */
848 put_device(&other_sch->dev);
849}
850
851static void sch_attach_orphaned_device(struct subchannel *sch,
852 struct ccw_device *cdev)
853{
854 int ret;
855 struct subchannel *pseudo_sch;
856
857 /* Get reference for new parent. */
858 if (!get_device(&sch->dev))
859 return;
860 pseudo_sch = to_subchannel(cdev->dev.parent);
861 /*
862 * Try to move the ccw device to its new subchannel.
863 * Note: device_move() changes cdev->dev.parent
864 */
865 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
866 if (ret) {
867 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
868 "failed (ret=%d)!\n",
869 cdev->private->dev_id.ssid,
870 cdev->private->dev_id.devno, ret);
871 /* Put reference for new parent. */
872 put_device(&sch->dev);
873 return;
874 }
875 sch_attach_device(sch, cdev);
876 /* Put reference on pseudo subchannel. */
877 put_device(&pseudo_sch->dev);
878}
879 755
880static void sch_create_and_recog_new_device(struct subchannel *sch) 756static void sch_create_and_recog_new_device(struct subchannel *sch)
881{ 757{
@@ -888,100 +764,19 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
888 css_sch_device_unregister(sch); 764 css_sch_device_unregister(sch);
889 return; 765 return;
890 } 766 }
891 spin_lock_irq(sch->lock);
892 sch_set_cdev(sch, cdev);
893 spin_unlock_irq(sch->lock);
894 /* Start recognition for the new ccw device. */ 767 /* Start recognition for the new ccw device. */
895 if (io_subchannel_recog(cdev, sch)) { 768 io_subchannel_recog(cdev, sch);
896 spin_lock_irq(sch->lock);
897 sch_set_cdev(sch, NULL);
898 spin_unlock_irq(sch->lock);
899 css_sch_device_unregister(sch);
900 /* Put reference from io_subchannel_create_ccwdev(). */
901 put_device(&sch->dev);
902 /* Give up initial reference. */
903 put_device(&cdev->dev);
904 }
905}
906
907
908void ccw_device_move_to_orphanage(struct work_struct *work)
909{
910 struct ccw_device_private *priv;
911 struct ccw_device *cdev;
912 struct ccw_device *replacing_cdev;
913 struct subchannel *sch;
914 int ret;
915 struct channel_subsystem *css;
916 struct ccw_dev_id dev_id;
917
918 priv = container_of(work, struct ccw_device_private, kick_work);
919 cdev = priv->cdev;
920 sch = to_subchannel(cdev->dev.parent);
921 css = to_css(sch->dev.parent);
922 dev_id.devno = sch->schib.pmcw.dev;
923 dev_id.ssid = sch->schid.ssid;
924
925 /* Increase refcount for pseudo subchannel. */
926 get_device(&css->pseudo_subchannel->dev);
927 /*
928 * Move the orphaned ccw device to the orphanage so the replacing
929 * ccw device can take its place on the subchannel.
930 * Note: device_move() changes cdev->dev.parent
931 */
932 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
933 DPM_ORDER_NONE);
934 if (ret) {
935 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
936 "(ret=%d)!\n", cdev->private->dev_id.ssid,
937 cdev->private->dev_id.devno, ret);
938 /* Decrease refcount for pseudo subchannel again. */
939 put_device(&css->pseudo_subchannel->dev);
940 return;
941 }
942 cdev->ccwlock = css->pseudo_subchannel->lock;
943 /*
944 * Search for the replacing ccw device
945 * - among the disconnected devices
946 * - in the orphanage
947 */
948 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
949 if (replacing_cdev) {
950 sch_attach_disconnected_device(sch, replacing_cdev);
951 /* Release reference from get_disc_ccwdev_by_dev_id() */
952 put_device(&replacing_cdev->dev);
953 /* Release reference of subchannel from old cdev. */
954 put_device(&sch->dev);
955 return;
956 }
957 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
958 if (replacing_cdev) {
959 sch_attach_orphaned_device(sch, replacing_cdev);
960 /* Release reference from get_orphaned_ccwdev_by_dev_id() */
961 put_device(&replacing_cdev->dev);
962 /* Release reference of subchannel from old cdev. */
963 put_device(&sch->dev);
964 return;
965 }
966 sch_create_and_recog_new_device(sch);
967 /* Release reference of subchannel from old cdev. */
968 put_device(&sch->dev);
969} 769}
970 770
971/* 771/*
972 * Register recognized device. 772 * Register recognized device.
973 */ 773 */
974static void 774static void io_subchannel_register(struct ccw_device *cdev)
975io_subchannel_register(struct work_struct *work)
976{ 775{
977 struct ccw_device_private *priv;
978 struct ccw_device *cdev;
979 struct subchannel *sch; 776 struct subchannel *sch;
980 int ret; 777 int ret;
981 unsigned long flags; 778 unsigned long flags;
982 779
983 priv = container_of(work, struct ccw_device_private, kick_work);
984 cdev = priv->cdev;
985 sch = to_subchannel(cdev->dev.parent); 780 sch = to_subchannel(cdev->dev.parent);
986 /* 781 /*
987 * Check if subchannel is still registered. It may have become 782 * Check if subchannel is still registered. It may have become
@@ -1033,41 +828,23 @@ out:
1033 cdev->private->flags.recog_done = 1; 828 cdev->private->flags.recog_done = 1;
1034 wake_up(&cdev->private->wait_q); 829 wake_up(&cdev->private->wait_q);
1035out_err: 830out_err:
1036 /* Release reference for workqueue processing. */
1037 put_device(&cdev->dev);
1038 if (atomic_dec_and_test(&ccw_device_init_count)) 831 if (atomic_dec_and_test(&ccw_device_init_count))
1039 wake_up(&ccw_device_init_wq); 832 wake_up(&ccw_device_init_wq);
1040} 833}
1041 834
1042static void ccw_device_call_sch_unregister(struct work_struct *work) 835static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
1043{ 836{
1044 struct ccw_device_private *priv;
1045 struct ccw_device *cdev;
1046 struct subchannel *sch; 837 struct subchannel *sch;
1047 838
1048 priv = container_of(work, struct ccw_device_private, kick_work);
1049 cdev = priv->cdev;
1050 /* Get subchannel reference for local processing. */ 839 /* Get subchannel reference for local processing. */
1051 if (!get_device(cdev->dev.parent)) 840 if (!get_device(cdev->dev.parent))
1052 return; 841 return;
1053 sch = to_subchannel(cdev->dev.parent); 842 sch = to_subchannel(cdev->dev.parent);
1054 css_sch_device_unregister(sch); 843 css_sch_device_unregister(sch);
1055 /* Release cdev reference for workqueue processing.*/
1056 put_device(&cdev->dev);
1057 /* Release subchannel reference for local processing. */ 844 /* Release subchannel reference for local processing. */
1058 put_device(&sch->dev); 845 put_device(&sch->dev);
1059} 846}
1060 847
1061void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
1062{
1063 /* Get cdev reference for workqueue processing. */
1064 if (!get_device(&cdev->dev))
1065 return;
1066 PREPARE_WORK(&cdev->private->kick_work,
1067 ccw_device_call_sch_unregister);
1068 queue_work(slow_path_wq, &cdev->private->kick_work);
1069}
1070
1071/* 848/*
1072 * subchannel recognition done. Called from the state machine. 849 * subchannel recognition done. Called from the state machine.
1073 */ 850 */
@@ -1083,7 +860,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1083 /* Device did not respond in time. */ 860 /* Device did not respond in time. */
1084 case DEV_STATE_NOT_OPER: 861 case DEV_STATE_NOT_OPER:
1085 cdev->private->flags.recog_done = 1; 862 cdev->private->flags.recog_done = 1;
1086 ccw_device_schedule_sch_unregister(cdev); 863 /* Remove device found not operational. */
864 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1087 if (atomic_dec_and_test(&ccw_device_init_count)) 865 if (atomic_dec_and_test(&ccw_device_init_count))
1088 wake_up(&ccw_device_init_wq); 866 wake_up(&ccw_device_init_wq);
1089 break; 867 break;
@@ -1092,22 +870,15 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1092 * We can't register the device in interrupt context so 870 * We can't register the device in interrupt context so
1093 * we schedule a work item. 871 * we schedule a work item.
1094 */ 872 */
1095 if (!get_device(&cdev->dev)) 873 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
1096 break;
1097 PREPARE_WORK(&cdev->private->kick_work,
1098 io_subchannel_register);
1099 queue_work(slow_path_wq, &cdev->private->kick_work);
1100 break; 874 break;
1101 } 875 }
1102} 876}
1103 877
1104static int 878static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1105io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1106{ 879{
1107 int rc;
1108 struct ccw_device_private *priv; 880 struct ccw_device_private *priv;
1109 881
1110 sch_set_cdev(sch, cdev);
1111 cdev->ccwlock = sch->lock; 882 cdev->ccwlock = sch->lock;
1112 883
1113 /* Init private data. */ 884 /* Init private data. */
@@ -1125,62 +896,81 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1125 896
1126 /* Start async. device sensing. */ 897 /* Start async. device sensing. */
1127 spin_lock_irq(sch->lock); 898 spin_lock_irq(sch->lock);
1128 rc = ccw_device_recognition(cdev); 899 sch_set_cdev(sch, cdev);
900 ccw_device_recognition(cdev);
1129 spin_unlock_irq(sch->lock); 901 spin_unlock_irq(sch->lock);
1130 if (rc) {
1131 if (atomic_dec_and_test(&ccw_device_init_count))
1132 wake_up(&ccw_device_init_wq);
1133 }
1134 return rc;
1135} 902}
1136 903
1137static void ccw_device_move_to_sch(struct work_struct *work) 904static int ccw_device_move_to_sch(struct ccw_device *cdev,
905 struct subchannel *sch)
1138{ 906{
1139 struct ccw_device_private *priv; 907 struct subchannel *old_sch;
1140 int rc; 908 int rc, old_enabled = 0;
1141 struct subchannel *sch;
1142 struct ccw_device *cdev;
1143 struct subchannel *former_parent;
1144 909
1145 priv = container_of(work, struct ccw_device_private, kick_work); 910 old_sch = to_subchannel(cdev->dev.parent);
1146 sch = priv->sch; 911 /* Obtain child reference for new parent. */
1147 cdev = priv->cdev;
1148 former_parent = to_subchannel(cdev->dev.parent);
1149 /* Get reference for new parent. */
1150 if (!get_device(&sch->dev)) 912 if (!get_device(&sch->dev))
1151 return; 913 return -ENODEV;
914
915 if (!sch_is_pseudo_sch(old_sch)) {
916 spin_lock_irq(old_sch->lock);
917 old_enabled = old_sch->schib.pmcw.ena;
918 rc = 0;
919 if (old_enabled)
920 rc = cio_disable_subchannel(old_sch);
921 spin_unlock_irq(old_sch->lock);
922 if (rc == -EBUSY) {
923 /* Release child reference for new parent. */
924 put_device(&sch->dev);
925 return rc;
926 }
927 }
928
1152 mutex_lock(&sch->reg_mutex); 929 mutex_lock(&sch->reg_mutex);
1153 /*
1154 * Try to move the ccw device to its new subchannel.
1155 * Note: device_move() changes cdev->dev.parent
1156 */
1157 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 930 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
1158 mutex_unlock(&sch->reg_mutex); 931 mutex_unlock(&sch->reg_mutex);
1159 if (rc) { 932 if (rc) {
1160 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel " 933 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
1161 "0.%x.%04x failed (ret=%d)!\n",
1162 cdev->private->dev_id.ssid, 934 cdev->private->dev_id.ssid,
1163 cdev->private->dev_id.devno, sch->schid.ssid, 935 cdev->private->dev_id.devno, sch->schid.ssid,
1164 sch->schid.sch_no, rc); 936 sch->schib.pmcw.dev, rc);
1165 css_sch_device_unregister(sch); 937 if (old_enabled) {
1166 /* Put reference for new parent again. */ 938 /* Try to reenable the old subchannel. */
939 spin_lock_irq(old_sch->lock);
940 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
941 spin_unlock_irq(old_sch->lock);
942 }
943 /* Release child reference for new parent. */
1167 put_device(&sch->dev); 944 put_device(&sch->dev);
1168 goto out; 945 return rc;
1169 } 946 }
1170 if (!sch_is_pseudo_sch(former_parent)) { 947 /* Clean up old subchannel. */
1171 spin_lock_irq(former_parent->lock); 948 if (!sch_is_pseudo_sch(old_sch)) {
1172 sch_set_cdev(former_parent, NULL); 949 spin_lock_irq(old_sch->lock);
1173 spin_unlock_irq(former_parent->lock); 950 sch_set_cdev(old_sch, NULL);
1174 css_sch_device_unregister(former_parent); 951 spin_unlock_irq(old_sch->lock);
1175 /* Reset intparm to zeroes. */ 952 css_schedule_eval(old_sch->schid);
1176 former_parent->config.intparm = 0;
1177 cio_commit_config(former_parent);
1178 } 953 }
1179 sch_attach_device(sch, cdev); 954 /* Release child reference for old parent. */
1180out: 955 put_device(&old_sch->dev);
1181 /* Put reference for old parent. */ 956 /* Initialize new subchannel. */
1182 put_device(&former_parent->dev); 957 spin_lock_irq(sch->lock);
1183 put_device(&cdev->dev); 958 cdev->private->schid = sch->schid;
959 cdev->ccwlock = sch->lock;
960 if (!sch_is_pseudo_sch(sch))
961 sch_set_cdev(sch, cdev);
962 spin_unlock_irq(sch->lock);
963 if (!sch_is_pseudo_sch(sch))
964 css_update_ssd_info(sch);
965 return 0;
966}
967
968static int ccw_device_move_to_orph(struct ccw_device *cdev)
969{
970 struct subchannel *sch = to_subchannel(cdev->dev.parent);
971 struct channel_subsystem *css = to_css(sch->dev.parent);
972
973 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1184} 974}
1185 975
1186static void io_subchannel_irq(struct subchannel *sch) 976static void io_subchannel_irq(struct subchannel *sch)
@@ -1199,9 +989,6 @@ void io_subchannel_init_config(struct subchannel *sch)
1199{ 989{
1200 memset(&sch->config, 0, sizeof(sch->config)); 990 memset(&sch->config, 0, sizeof(sch->config));
1201 sch->config.csense = 1; 991 sch->config.csense = 1;
1202 /* Use subchannel mp mode when there is more than 1 installed CHPID. */
1203 if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
1204 sch->config.mp = 1;
1205} 992}
1206 993
1207static void io_subchannel_init_fields(struct subchannel *sch) 994static void io_subchannel_init_fields(struct subchannel *sch)
@@ -1222,23 +1009,6 @@ static void io_subchannel_init_fields(struct subchannel *sch)
1222 io_subchannel_init_config(sch); 1009 io_subchannel_init_config(sch);
1223} 1010}
1224 1011
1225static void io_subchannel_do_unreg(struct work_struct *work)
1226{
1227 struct subchannel *sch;
1228
1229 sch = container_of(work, struct subchannel, work);
1230 css_sch_device_unregister(sch);
1231 put_device(&sch->dev);
1232}
1233
1234/* Schedule unregister if we have no cdev. */
1235static void io_subchannel_schedule_removal(struct subchannel *sch)
1236{
1237 get_device(&sch->dev);
1238 INIT_WORK(&sch->work, io_subchannel_do_unreg);
1239 queue_work(slow_path_wq, &sch->work);
1240}
1241
1242/* 1012/*
1243 * Note: We always return 0 so that we bind to the device even on error. 1013 * Note: We always return 0 so that we bind to the device even on error.
1244 * This is needed so that our remove function is called on unregister. 1014 * This is needed so that our remove function is called on unregister.
@@ -1247,11 +1017,8 @@ static int io_subchannel_probe(struct subchannel *sch)
1247{ 1017{
1248 struct ccw_device *cdev; 1018 struct ccw_device *cdev;
1249 int rc; 1019 int rc;
1250 unsigned long flags;
1251 struct ccw_dev_id dev_id;
1252 1020
1253 cdev = sch_get_cdev(sch); 1021 if (cio_is_console(sch->schid)) {
1254 if (cdev) {
1255 rc = sysfs_create_group(&sch->dev.kobj, 1022 rc = sysfs_create_group(&sch->dev.kobj,
1256 &io_subchannel_attr_group); 1023 &io_subchannel_attr_group);
1257 if (rc) 1024 if (rc)
@@ -1260,15 +1027,16 @@ static int io_subchannel_probe(struct subchannel *sch)
1260 "0.%x.%04x (rc=%d)\n", 1027 "0.%x.%04x (rc=%d)\n",
1261 sch->schid.ssid, sch->schid.sch_no, rc); 1028 sch->schid.ssid, sch->schid.sch_no, rc);
1262 /* 1029 /*
1263 * This subchannel already has an associated ccw_device. 1030 * The console subchannel already has an associated ccw_device.
1264 * Throw the delayed uevent for the subchannel, register 1031 * Throw the delayed uevent for the subchannel, register
1265 * the ccw_device and exit. This happens for all early 1032 * the ccw_device and exit.
1266 * devices, e.g. the console.
1267 */ 1033 */
1268 dev_set_uevent_suppress(&sch->dev, 0); 1034 dev_set_uevent_suppress(&sch->dev, 0);
1269 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 1035 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1036 cdev = sch_get_cdev(sch);
1270 cdev->dev.groups = ccwdev_attr_groups; 1037 cdev->dev.groups = ccwdev_attr_groups;
1271 device_initialize(&cdev->dev); 1038 device_initialize(&cdev->dev);
1039 cdev->private->flags.initialized = 1;
1272 ccw_device_register(cdev); 1040 ccw_device_register(cdev);
1273 /* 1041 /*
1274 * Check if the device is already online. If it is 1042 * Check if the device is already online. If it is
@@ -1293,44 +1061,14 @@ static int io_subchannel_probe(struct subchannel *sch)
1293 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1061 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1294 GFP_KERNEL | GFP_DMA); 1062 GFP_KERNEL | GFP_DMA);
1295 if (!sch->private) 1063 if (!sch->private)
1296 goto out_err; 1064 goto out_schedule;
1297 /* 1065 css_schedule_eval(sch->schid);
1298 * First check if a fitting device may be found amongst the
1299 * disconnected devices or in the orphanage.
1300 */
1301 dev_id.devno = sch->schib.pmcw.dev;
1302 dev_id.ssid = sch->schid.ssid;
1303 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1304 if (!cdev)
1305 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1306 &dev_id);
1307 if (cdev) {
1308 /*
1309 * Schedule moving the device until when we have a registered
1310 * subchannel to move to and succeed the probe. We can
1311 * unregister later again, when the probe is through.
1312 */
1313 cdev->private->sch = sch;
1314 PREPARE_WORK(&cdev->private->kick_work,
1315 ccw_device_move_to_sch);
1316 queue_work(slow_path_wq, &cdev->private->kick_work);
1317 return 0;
1318 }
1319 cdev = io_subchannel_create_ccwdev(sch);
1320 if (IS_ERR(cdev))
1321 goto out_err;
1322 rc = io_subchannel_recog(cdev, sch);
1323 if (rc) {
1324 spin_lock_irqsave(sch->lock, flags);
1325 io_subchannel_recog_done(cdev);
1326 spin_unlock_irqrestore(sch->lock, flags);
1327 }
1328 return 0; 1066 return 0;
1329out_err: 1067
1330 kfree(sch->private);
1331 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1332out_schedule: 1068out_schedule:
1333 io_subchannel_schedule_removal(sch); 1069 spin_lock_irq(sch->lock);
1070 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1071 spin_unlock_irq(sch->lock);
1334 return 0; 1072 return 0;
1335} 1073}
1336 1074
@@ -1338,32 +1076,23 @@ static int
1338io_subchannel_remove (struct subchannel *sch) 1076io_subchannel_remove (struct subchannel *sch)
1339{ 1077{
1340 struct ccw_device *cdev; 1078 struct ccw_device *cdev;
1341 unsigned long flags;
1342 1079
1343 cdev = sch_get_cdev(sch); 1080 cdev = sch_get_cdev(sch);
1344 if (!cdev) 1081 if (!cdev)
1345 return 0; 1082 goto out_free;
1083 io_subchannel_quiesce(sch);
1346 /* Set ccw device to not operational and drop reference. */ 1084 /* Set ccw device to not operational and drop reference. */
1347 spin_lock_irqsave(cdev->ccwlock, flags); 1085 spin_lock_irq(cdev->ccwlock);
1348 sch_set_cdev(sch, NULL); 1086 sch_set_cdev(sch, NULL);
1349 cdev->private->state = DEV_STATE_NOT_OPER; 1087 cdev->private->state = DEV_STATE_NOT_OPER;
1350 spin_unlock_irqrestore(cdev->ccwlock, flags); 1088 spin_unlock_irq(cdev->ccwlock);
1351 ccw_device_unregister(cdev); 1089 ccw_device_unregister(cdev);
1090out_free:
1352 kfree(sch->private); 1091 kfree(sch->private);
1353 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1092 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1354 return 0; 1093 return 0;
1355} 1094}
1356 1095
1357static int io_subchannel_notify(struct subchannel *sch, int event)
1358{
1359 struct ccw_device *cdev;
1360
1361 cdev = sch_get_cdev(sch);
1362 if (!cdev)
1363 return 0;
1364 return ccw_device_notify(cdev, event);
1365}
1366
1367static void io_subchannel_verify(struct subchannel *sch) 1096static void io_subchannel_verify(struct subchannel *sch)
1368{ 1097{
1369 struct ccw_device *cdev; 1098 struct ccw_device *cdev;
@@ -1373,36 +1102,6 @@ static void io_subchannel_verify(struct subchannel *sch)
1373 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1102 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1374} 1103}
1375 1104
1376static int check_for_io_on_path(struct subchannel *sch, int mask)
1377{
1378 if (cio_update_schib(sch))
1379 return 0;
1380 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1381 return 1;
1382 return 0;
1383}
1384
1385static void terminate_internal_io(struct subchannel *sch,
1386 struct ccw_device *cdev)
1387{
1388 if (cio_clear(sch)) {
1389 /* Recheck device in case clear failed. */
1390 sch->lpm = 0;
1391 if (cdev->online)
1392 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1393 else
1394 css_schedule_eval(sch->schid);
1395 return;
1396 }
1397 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1398 /* Request retry of internal operation. */
1399 cdev->private->flags.intretry = 1;
1400 /* Call handler. */
1401 if (cdev->handler)
1402 cdev->handler(cdev, cdev->private->intparm,
1403 ERR_PTR(-EIO));
1404}
1405
1406static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1105static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1407{ 1106{
1408 struct ccw_device *cdev; 1107 struct ccw_device *cdev;
@@ -1410,18 +1109,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1410 cdev = sch_get_cdev(sch); 1109 cdev = sch_get_cdev(sch);
1411 if (!cdev) 1110 if (!cdev)
1412 return; 1111 return;
1413 if (check_for_io_on_path(sch, mask)) { 1112 if (cio_update_schib(sch))
1414 if (cdev->private->state == DEV_STATE_ONLINE) 1113 goto err;
1415 ccw_device_kill_io(cdev); 1114 /* Check for I/O on path. */
1416 else { 1115 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1417 terminate_internal_io(sch, cdev); 1116 goto out;
1418 /* Re-start path verification. */ 1117 if (cdev->private->state == DEV_STATE_ONLINE) {
1419 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1118 ccw_device_kill_io(cdev);
1420 } 1119 goto out;
1421 } else 1120 }
1422 /* trigger path verification. */ 1121 if (cio_clear(sch))
1423 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1122 goto err;
1123out:
1124 /* Trigger path verification. */
1125 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1126 return;
1424 1127
1128err:
1129 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1425} 1130}
1426 1131
1427static int io_subchannel_chp_event(struct subchannel *sch, 1132static int io_subchannel_chp_event(struct subchannel *sch,
@@ -1458,46 +1163,41 @@ static int io_subchannel_chp_event(struct subchannel *sch,
1458 return 0; 1163 return 0;
1459} 1164}
1460 1165
1461static void 1166static void io_subchannel_quiesce(struct subchannel *sch)
1462io_subchannel_shutdown(struct subchannel *sch)
1463{ 1167{
1464 struct ccw_device *cdev; 1168 struct ccw_device *cdev;
1465 int ret; 1169 int ret;
1466 1170
1171 spin_lock_irq(sch->lock);
1467 cdev = sch_get_cdev(sch); 1172 cdev = sch_get_cdev(sch);
1468
1469 if (cio_is_console(sch->schid)) 1173 if (cio_is_console(sch->schid))
1470 return; 1174 goto out_unlock;
1471 if (!sch->schib.pmcw.ena) 1175 if (!sch->schib.pmcw.ena)
1472 /* Nothing to do. */ 1176 goto out_unlock;
1473 return;
1474 ret = cio_disable_subchannel(sch); 1177 ret = cio_disable_subchannel(sch);
1475 if (ret != -EBUSY) 1178 if (ret != -EBUSY)
1476 /* Subchannel is disabled, we're done. */ 1179 goto out_unlock;
1477 return;
1478 cdev->private->state = DEV_STATE_QUIESCE;
1479 if (cdev->handler) 1180 if (cdev->handler)
1480 cdev->handler(cdev, cdev->private->intparm, 1181 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1481 ERR_PTR(-EIO)); 1182 while (ret == -EBUSY) {
1482 ret = ccw_device_cancel_halt_clear(cdev); 1183 cdev->private->state = DEV_STATE_QUIESCE;
1483 if (ret == -EBUSY) { 1184 ret = ccw_device_cancel_halt_clear(cdev);
1484 ccw_device_set_timeout(cdev, HZ/10); 1185 if (ret == -EBUSY) {
1485 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1186 ccw_device_set_timeout(cdev, HZ/10);
1187 spin_unlock_irq(sch->lock);
1188 wait_event(cdev->private->wait_q,
1189 cdev->private->state != DEV_STATE_QUIESCE);
1190 spin_lock_irq(sch->lock);
1191 }
1192 ret = cio_disable_subchannel(sch);
1486 } 1193 }
1487 cio_disable_subchannel(sch); 1194out_unlock:
1195 spin_unlock_irq(sch->lock);
1488} 1196}
1489 1197
1490static int io_subchannel_get_status(struct subchannel *sch) 1198static void io_subchannel_shutdown(struct subchannel *sch)
1491{ 1199{
1492 struct schib schib; 1200 io_subchannel_quiesce(sch);
1493
1494 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1495 return CIO_GONE;
1496 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1497 return CIO_REVALIDATE;
1498 if (!sch->lpm)
1499 return CIO_NO_PATH;
1500 return CIO_OPER;
1501} 1201}
1502 1202
1503static int device_is_disconnected(struct ccw_device *cdev) 1203static int device_is_disconnected(struct ccw_device *cdev)
@@ -1576,20 +1276,16 @@ static void ccw_device_schedule_recovery(void)
1576static int purge_fn(struct device *dev, void *data) 1276static int purge_fn(struct device *dev, void *data)
1577{ 1277{
1578 struct ccw_device *cdev = to_ccwdev(dev); 1278 struct ccw_device *cdev = to_ccwdev(dev);
1579 struct ccw_device_private *priv = cdev->private; 1279 struct ccw_dev_id *id = &cdev->private->dev_id;
1580 int unreg;
1581 1280
1582 spin_lock_irq(cdev->ccwlock); 1281 spin_lock_irq(cdev->ccwlock);
1583 unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) && 1282 if (is_blacklisted(id->ssid, id->devno) &&
1584 (priv->state == DEV_STATE_OFFLINE); 1283 (cdev->private->state == DEV_STATE_OFFLINE)) {
1284 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1285 id->devno);
1286 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1287 }
1585 spin_unlock_irq(cdev->ccwlock); 1288 spin_unlock_irq(cdev->ccwlock);
1586 if (!unreg)
1587 goto out;
1588 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1589 priv->dev_id.devno);
1590 ccw_device_schedule_sch_unregister(cdev);
1591
1592out:
1593 /* Abort loop in case of pending signal. */ 1289 /* Abort loop in case of pending signal. */
1594 if (signal_pending(current)) 1290 if (signal_pending(current))
1595 return -EINTR; 1291 return -EINTR;
@@ -1609,7 +1305,7 @@ int ccw_purge_blacklisted(void)
1609 return 0; 1305 return 0;
1610} 1306}
1611 1307
1612static void device_set_disconnected(struct ccw_device *cdev) 1308void ccw_device_set_disconnected(struct ccw_device *cdev)
1613{ 1309{
1614 if (!cdev) 1310 if (!cdev)
1615 return; 1311 return;
@@ -1631,91 +1327,169 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
1631 cdev->private->state = DEV_STATE_NOT_OPER; 1327 cdev->private->state = DEV_STATE_NOT_OPER;
1632} 1328}
1633 1329
1634static int io_subchannel_sch_event(struct subchannel *sch, int slow) 1330enum io_sch_action {
1331 IO_SCH_UNREG,
1332 IO_SCH_ORPH_UNREG,
1333 IO_SCH_ATTACH,
1334 IO_SCH_UNREG_ATTACH,
1335 IO_SCH_ORPH_ATTACH,
1336 IO_SCH_REPROBE,
1337 IO_SCH_VERIFY,
1338 IO_SCH_DISC,
1339 IO_SCH_NOP,
1340};
1341
1342static enum io_sch_action sch_get_action(struct subchannel *sch)
1343{
1344 struct ccw_device *cdev;
1345
1346 cdev = sch_get_cdev(sch);
1347 if (cio_update_schib(sch)) {
1348 /* Not operational. */
1349 if (!cdev)
1350 return IO_SCH_UNREG;
1351 if (!ccw_device_notify(cdev, CIO_GONE))
1352 return IO_SCH_UNREG;
1353 return IO_SCH_ORPH_UNREG;
1354 }
1355 /* Operational. */
1356 if (!cdev)
1357 return IO_SCH_ATTACH;
1358 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1359 if (!ccw_device_notify(cdev, CIO_GONE))
1360 return IO_SCH_UNREG_ATTACH;
1361 return IO_SCH_ORPH_ATTACH;
1362 }
1363 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1364 if (!ccw_device_notify(cdev, CIO_NO_PATH))
1365 return IO_SCH_UNREG;
1366 return IO_SCH_DISC;
1367 }
1368 if (device_is_disconnected(cdev))
1369 return IO_SCH_REPROBE;
1370 if (cdev->online)
1371 return IO_SCH_VERIFY;
1372 return IO_SCH_NOP;
1373}
1374
1375/**
1376 * io_subchannel_sch_event - process subchannel event
1377 * @sch: subchannel
1378 * @process: non-zero if function is called in process context
1379 *
1380 * An unspecified event occurred for this subchannel. Adjust data according
1381 * to the current operational state of the subchannel and device. Return
1382 * zero when the event has been handled sufficiently or -EAGAIN when this
1383 * function should be called again in process context.
1384 */
1385static int io_subchannel_sch_event(struct subchannel *sch, int process)
1635{ 1386{
1636 int event, ret, disc;
1637 unsigned long flags; 1387 unsigned long flags;
1638 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
1639 struct ccw_device *cdev; 1388 struct ccw_device *cdev;
1389 struct ccw_dev_id dev_id;
1390 enum io_sch_action action;
1391 int rc = -EAGAIN;
1640 1392
1641 spin_lock_irqsave(sch->lock, flags); 1393 spin_lock_irqsave(sch->lock, flags);
1394 if (!device_is_registered(&sch->dev))
1395 goto out_unlock;
1396 if (work_pending(&sch->todo_work))
1397 goto out_unlock;
1642 cdev = sch_get_cdev(sch); 1398 cdev = sch_get_cdev(sch);
1643 disc = device_is_disconnected(cdev); 1399 if (cdev && work_pending(&cdev->private->todo_work))
1644 if (disc && slow) { 1400 goto out_unlock;
1645 /* Disconnected devices are evaluated directly only.*/ 1401 action = sch_get_action(sch);
1646 spin_unlock_irqrestore(sch->lock, flags); 1402 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1647 return 0; 1403 sch->schid.ssid, sch->schid.sch_no, process,
1648 } 1404 action);
1649 /* No interrupt after machine check - kill pending timers. */ 1405 /* Perform immediate actions while holding the lock. */
1650 if (cdev) 1406 switch (action) {
1651 ccw_device_set_timeout(cdev, 0); 1407 case IO_SCH_REPROBE:
1652 if (!disc && !slow) { 1408 /* Trigger device recognition. */
1653 /* Non-disconnected devices are evaluated on the slow path. */ 1409 ccw_device_trigger_reprobe(cdev);
1654 spin_unlock_irqrestore(sch->lock, flags); 1410 rc = 0;
1655 return -EAGAIN; 1411 goto out_unlock;
1412 case IO_SCH_VERIFY:
1413 /* Trigger path verification. */
1414 io_subchannel_verify(sch);
1415 rc = 0;
1416 goto out_unlock;
1417 case IO_SCH_DISC:
1418 ccw_device_set_disconnected(cdev);
1419 rc = 0;
1420 goto out_unlock;
1421 case IO_SCH_ORPH_UNREG:
1422 case IO_SCH_ORPH_ATTACH:
1423 ccw_device_set_disconnected(cdev);
1424 break;
1425 case IO_SCH_UNREG_ATTACH:
1426 case IO_SCH_UNREG:
1427 if (cdev)
1428 ccw_device_set_notoper(cdev);
1429 break;
1430 case IO_SCH_NOP:
1431 rc = 0;
1432 goto out_unlock;
1433 default:
1434 break;
1656 } 1435 }
1657 event = io_subchannel_get_status(sch); 1436 spin_unlock_irqrestore(sch->lock, flags);
1658 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 1437 /* All other actions require process context. */
1659 sch->schid.ssid, sch->schid.sch_no, event, 1438 if (!process)
1660 disc ? "disconnected" : "normal", 1439 goto out;
1661 slow ? "slow" : "fast"); 1440 /* Handle attached ccw device. */
1662 /* Analyze subchannel status. */ 1441 switch (action) {
1663 action = NONE; 1442 case IO_SCH_ORPH_UNREG:
1664 switch (event) { 1443 case IO_SCH_ORPH_ATTACH:
1665 case CIO_NO_PATH: 1444 /* Move ccw device to orphanage. */
1666 if (disc) { 1445 rc = ccw_device_move_to_orph(cdev);
1667 /* Check if paths have become available. */ 1446 if (rc)
1668 action = REPROBE; 1447 goto out;
1669 break;
1670 }
1671 /* fall through */
1672 case CIO_GONE:
1673 /* Ask driver what to do with device. */
1674 if (io_subchannel_notify(sch, event))
1675 action = DISC;
1676 else
1677 action = UNREGISTER;
1678 break; 1448 break;
1679 case CIO_REVALIDATE: 1449 case IO_SCH_UNREG_ATTACH:
1680 /* Device will be removed, so no notify necessary. */ 1450 /* Unregister ccw device. */
1681 if (disc) 1451 ccw_device_unregister(cdev);
1682 /* Reprobe because immediate unregister might block. */
1683 action = REPROBE;
1684 else
1685 action = UNREGISTER_PROBE;
1686 break; 1452 break;
1687 case CIO_OPER: 1453 default:
1688 if (disc)
1689 /* Get device operational again. */
1690 action = REPROBE;
1691 break; 1454 break;
1692 } 1455 }
1693 /* Perform action. */ 1456 /* Handle subchannel. */
1694 ret = 0;
1695 switch (action) { 1457 switch (action) {
1696 case UNREGISTER: 1458 case IO_SCH_ORPH_UNREG:
1697 case UNREGISTER_PROBE: 1459 case IO_SCH_UNREG:
1698 ccw_device_set_notoper(cdev);
1699 /* Unregister device (will use subchannel lock). */
1700 spin_unlock_irqrestore(sch->lock, flags);
1701 css_sch_device_unregister(sch); 1460 css_sch_device_unregister(sch);
1702 spin_lock_irqsave(sch->lock, flags);
1703 break; 1461 break;
1704 case REPROBE: 1462 case IO_SCH_ORPH_ATTACH:
1463 case IO_SCH_UNREG_ATTACH:
1464 case IO_SCH_ATTACH:
1465 dev_id.ssid = sch->schid.ssid;
1466 dev_id.devno = sch->schib.pmcw.dev;
1467 cdev = get_ccwdev_by_dev_id(&dev_id);
1468 if (!cdev) {
1469 sch_create_and_recog_new_device(sch);
1470 break;
1471 }
1472 rc = ccw_device_move_to_sch(cdev, sch);
1473 if (rc) {
1474 /* Release reference from get_ccwdev_by_dev_id() */
1475 put_device(&cdev->dev);
1476 goto out;
1477 }
1478 spin_lock_irqsave(sch->lock, flags);
1705 ccw_device_trigger_reprobe(cdev); 1479 ccw_device_trigger_reprobe(cdev);
1706 break; 1480 spin_unlock_irqrestore(sch->lock, flags);
1707 case DISC: 1481 /* Release reference from get_ccwdev_by_dev_id() */
1708 device_set_disconnected(cdev); 1482 put_device(&cdev->dev);
1709 break; 1483 break;
1710 default: 1484 default:
1711 break; 1485 break;
1712 } 1486 }
1713 spin_unlock_irqrestore(sch->lock, flags); 1487 return 0;
1714 /* Probe if necessary. */
1715 if (action == UNREGISTER_PROBE)
1716 ret = css_probe_device(sch->schid);
1717 1488
1718 return ret; 1489out_unlock:
1490 spin_unlock_irqrestore(sch->lock, flags);
1491out:
1492 return rc;
1719} 1493}
1720 1494
1721#ifdef CONFIG_CCW_CONSOLE 1495#ifdef CONFIG_CCW_CONSOLE
@@ -1745,10 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1745 sch->driver = &io_subchannel_driver; 1519 sch->driver = &io_subchannel_driver;
1746 /* Initialize the ccw_device structure. */ 1520 /* Initialize the ccw_device structure. */
1747 cdev->dev.parent= &sch->dev; 1521 cdev->dev.parent= &sch->dev;
1748 rc = io_subchannel_recog(cdev, sch); 1522 io_subchannel_recog(cdev, sch);
1749 if (rc)
1750 return rc;
1751
1752 /* Now wait for the async. recognition to come to an end. */ 1523 /* Now wait for the async. recognition to come to an end. */
1753 spin_lock_irq(cdev->ccwlock); 1524 spin_lock_irq(cdev->ccwlock);
1754 while (!dev_fsm_final_state(cdev)) 1525 while (!dev_fsm_final_state(cdev))
@@ -1764,7 +1535,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1764 rc = 0; 1535 rc = 0;
1765out_unlock: 1536out_unlock:
1766 spin_unlock_irq(cdev->ccwlock); 1537 spin_unlock_irq(cdev->ccwlock);
1767 return 0; 1538 return rc;
1768} 1539}
1769 1540
1770struct ccw_device * 1541struct ccw_device *
@@ -1920,7 +1691,7 @@ static int ccw_device_pm_prepare(struct device *dev)
1920{ 1691{
1921 struct ccw_device *cdev = to_ccwdev(dev); 1692 struct ccw_device *cdev = to_ccwdev(dev);
1922 1693
1923 if (work_pending(&cdev->private->kick_work)) 1694 if (work_pending(&cdev->private->todo_work))
1924 return -EAGAIN; 1695 return -EAGAIN;
1925 /* Fail while device is being set online/offline. */ 1696 /* Fail while device is being set online/offline. */
1926 if (atomic_read(&cdev->private->onoff)) 1697 if (atomic_read(&cdev->private->onoff))
@@ -2006,7 +1777,6 @@ static int ccw_device_pm_thaw(struct device *dev)
2006static void __ccw_device_pm_restore(struct ccw_device *cdev) 1777static void __ccw_device_pm_restore(struct ccw_device *cdev)
2007{ 1778{
2008 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1779 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2009 int ret;
2010 1780
2011 if (cio_is_console(sch->schid)) 1781 if (cio_is_console(sch->schid))
2012 goto out; 1782 goto out;
@@ -2016,22 +1786,10 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
2016 */ 1786 */
2017 spin_lock_irq(sch->lock); 1787 spin_lock_irq(sch->lock);
2018 cdev->private->flags.resuming = 1; 1788 cdev->private->flags.resuming = 1;
2019 ret = ccw_device_recognition(cdev); 1789 ccw_device_recognition(cdev);
2020 spin_unlock_irq(sch->lock); 1790 spin_unlock_irq(sch->lock);
2021 if (ret) {
2022 CIO_MSG_EVENT(0, "Couldn't start recognition for device "
2023 "0.%x.%04x (ret=%d)\n",
2024 cdev->private->dev_id.ssid,
2025 cdev->private->dev_id.devno, ret);
2026 spin_lock_irq(sch->lock);
2027 cdev->private->state = DEV_STATE_DISCONNECTED;
2028 spin_unlock_irq(sch->lock);
2029 /* notify driver after the resume cb */
2030 goto out;
2031 }
2032 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1791 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
2033 cdev->private->state == DEV_STATE_DISCONNECTED); 1792 cdev->private->state == DEV_STATE_DISCONNECTED);
2034
2035out: 1793out:
2036 cdev->private->flags.resuming = 0; 1794 cdev->private->flags.resuming = 0;
2037} 1795}
@@ -2041,7 +1799,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
2041 cdev->private->state = DEV_STATE_BOXED; 1799 cdev->private->state = DEV_STATE_BOXED;
2042 if (ccw_device_notify(cdev, CIO_BOXED)) 1800 if (ccw_device_notify(cdev, CIO_BOXED))
2043 return 0; 1801 return 0;
2044 ccw_device_schedule_sch_unregister(cdev); 1802 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2045 return -ENODEV; 1803 return -ENODEV;
2046} 1804}
2047 1805
@@ -2050,7 +1808,7 @@ static int resume_handle_disc(struct ccw_device *cdev)
2050 cdev->private->state = DEV_STATE_DISCONNECTED; 1808 cdev->private->state = DEV_STATE_DISCONNECTED;
2051 if (ccw_device_notify(cdev, CIO_GONE)) 1809 if (ccw_device_notify(cdev, CIO_GONE))
2052 return 0; 1810 return 0;
2053 ccw_device_schedule_sch_unregister(cdev); 1811 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2054 return -ENODEV; 1812 return -ENODEV;
2055} 1813}
2056 1814
@@ -2095,9 +1853,7 @@ static int ccw_device_pm_restore(struct device *dev)
2095 /* check if the device type has changed */ 1853 /* check if the device type has changed */
2096 if (!ccw_device_test_sense_data(cdev)) { 1854 if (!ccw_device_test_sense_data(cdev)) {
2097 ccw_device_update_sense_data(cdev); 1855 ccw_device_update_sense_data(cdev);
2098 PREPARE_WORK(&cdev->private->kick_work, 1856 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
2099 ccw_device_do_unbind_bind);
2100 queue_work(ccw_device_work, &cdev->private->kick_work);
2101 ret = -ENODEV; 1857 ret = -ENODEV;
2102 goto out_unlock; 1858 goto out_unlock;
2103 } 1859 }
@@ -2141,14 +1897,14 @@ out_disc_unlock:
2141 goto out_restore; 1897 goto out_restore;
2142 1898
2143out_unreg_unlock: 1899out_unreg_unlock:
2144 ccw_device_schedule_sch_unregister(cdev); 1900 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
2145 ret = -ENODEV; 1901 ret = -ENODEV;
2146out_unlock: 1902out_unlock:
2147 spin_unlock_irq(sch->lock); 1903 spin_unlock_irq(sch->lock);
2148 return ret; 1904 return ret;
2149} 1905}
2150 1906
2151static struct dev_pm_ops ccw_pm_ops = { 1907static const struct dev_pm_ops ccw_pm_ops = {
2152 .prepare = ccw_device_pm_prepare, 1908 .prepare = ccw_device_pm_prepare,
2153 .complete = ccw_device_pm_complete, 1909 .complete = ccw_device_pm_complete,
2154 .freeze = ccw_device_pm_freeze, 1910 .freeze = ccw_device_pm_freeze,
@@ -2206,6 +1962,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
2206 return sch->schid; 1962 return sch->schid;
2207} 1963}
2208 1964
1965static void ccw_device_todo(struct work_struct *work)
1966{
1967 struct ccw_device_private *priv;
1968 struct ccw_device *cdev;
1969 struct subchannel *sch;
1970 enum cdev_todo todo;
1971
1972 priv = container_of(work, struct ccw_device_private, todo_work);
1973 cdev = priv->cdev;
1974 sch = to_subchannel(cdev->dev.parent);
1975 /* Find out todo. */
1976 spin_lock_irq(cdev->ccwlock);
1977 todo = priv->todo;
1978 priv->todo = CDEV_TODO_NOTHING;
1979 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1980 priv->dev_id.ssid, priv->dev_id.devno, todo);
1981 spin_unlock_irq(cdev->ccwlock);
1982 /* Perform todo. */
1983 switch (todo) {
1984 case CDEV_TODO_ENABLE_CMF:
1985 cmf_reenable(cdev);
1986 break;
1987 case CDEV_TODO_REBIND:
1988 ccw_device_do_unbind_bind(cdev);
1989 break;
1990 case CDEV_TODO_REGISTER:
1991 io_subchannel_register(cdev);
1992 break;
1993 case CDEV_TODO_UNREG_EVAL:
1994 if (!sch_is_pseudo_sch(sch))
1995 css_schedule_eval(sch->schid);
1996 /* fall-through */
1997 case CDEV_TODO_UNREG:
1998 if (sch_is_pseudo_sch(sch))
1999 ccw_device_unregister(cdev);
2000 else
2001 ccw_device_call_sch_unregister(cdev);
2002 break;
2003 default:
2004 break;
2005 }
2006 /* Release workqueue ref. */
2007 put_device(&cdev->dev);
2008}
2009
2010/**
2011 * ccw_device_sched_todo - schedule ccw device operation
2012 * @cdev: ccw device
2013 * @todo: todo
2014 *
2015 * Schedule the operation identified by @todo to be performed on the slow path
2016 * workqueue. Do nothing if another operation with higher priority is already
2017 * scheduled. Needs to be called with ccwdev lock held.
2018 */
2019void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2020{
2021 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2022 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2023 todo);
2024 if (cdev->private->todo >= todo)
2025 return;
2026 cdev->private->todo = todo;
2027 /* Get workqueue ref. */
2028 if (!get_device(&cdev->dev))
2029 return;
2030 if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
2031 /* Already queued, release workqueue ref. */
2032 put_device(&cdev->dev);
2033 }
2034}
2035
2209MODULE_LICENSE("GPL"); 2036MODULE_LICENSE("GPL");
2210EXPORT_SYMBOL(ccw_device_set_online); 2037EXPORT_SYMBOL(ccw_device_set_online);
2211EXPORT_SYMBOL(ccw_device_set_offline); 2038EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index ed39a2caaf47..bcfe13e42638 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -21,7 +21,6 @@ enum dev_state {
21 DEV_STATE_DISBAND_PGID, 21 DEV_STATE_DISBAND_PGID,
22 DEV_STATE_BOXED, 22 DEV_STATE_BOXED,
23 /* states to wait for i/o completion before doing something */ 23 /* states to wait for i/o completion before doing something */
24 DEV_STATE_CLEAR_VERIFY,
25 DEV_STATE_TIMEOUT_KILL, 24 DEV_STATE_TIMEOUT_KILL,
26 DEV_STATE_QUIESCE, 25 DEV_STATE_QUIESCE,
27 /* special states for devices gone not operational */ 26 /* special states for devices gone not operational */
@@ -29,6 +28,7 @@ enum dev_state {
29 DEV_STATE_DISCONNECTED_SENSE_ID, 28 DEV_STATE_DISCONNECTED_SENSE_ID,
30 DEV_STATE_CMFCHANGE, 29 DEV_STATE_CMFCHANGE,
31 DEV_STATE_CMFUPDATE, 30 DEV_STATE_CMFUPDATE,
31 DEV_STATE_STEAL_LOCK,
32 /* last element! */ 32 /* last element! */
33 NR_DEV_STATES 33 NR_DEV_STATES
34}; 34};
@@ -81,17 +81,16 @@ void io_subchannel_init_config(struct subchannel *sch);
81 81
82int ccw_device_cancel_halt_clear(struct ccw_device *); 82int ccw_device_cancel_halt_clear(struct ccw_device *);
83 83
84void ccw_device_do_unbind_bind(struct work_struct *);
85void ccw_device_move_to_orphanage(struct work_struct *);
86int ccw_device_is_orphan(struct ccw_device *); 84int ccw_device_is_orphan(struct ccw_device *);
87 85
88int ccw_device_recognition(struct ccw_device *); 86void ccw_device_recognition(struct ccw_device *);
89int ccw_device_online(struct ccw_device *); 87int ccw_device_online(struct ccw_device *);
90int ccw_device_offline(struct ccw_device *); 88int ccw_device_offline(struct ccw_device *);
91void ccw_device_update_sense_data(struct ccw_device *); 89void ccw_device_update_sense_data(struct ccw_device *);
92int ccw_device_test_sense_data(struct ccw_device *); 90int ccw_device_test_sense_data(struct ccw_device *);
93void ccw_device_schedule_sch_unregister(struct ccw_device *); 91void ccw_device_schedule_sch_unregister(struct ccw_device *);
94int ccw_purge_blacklisted(void); 92int ccw_purge_blacklisted(void);
93void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
95 94
96/* Function prototypes for device status and basic sense stuff. */ 95/* Function prototypes for device status and basic sense stuff. */
97void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 96void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -99,24 +98,28 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
99int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *); 98int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
100int ccw_device_do_sense(struct ccw_device *, struct irb *); 99int ccw_device_do_sense(struct ccw_device *, struct irb *);
101 100
101/* Function prototype for internal request handling. */
102int lpm_adjust(int lpm, int mask);
103void ccw_request_start(struct ccw_device *);
104int ccw_request_cancel(struct ccw_device *cdev);
105void ccw_request_handler(struct ccw_device *cdev);
106void ccw_request_timeout(struct ccw_device *cdev);
107void ccw_request_notoper(struct ccw_device *cdev);
108
102/* Function prototypes for sense id stuff. */ 109/* Function prototypes for sense id stuff. */
103void ccw_device_sense_id_start(struct ccw_device *); 110void ccw_device_sense_id_start(struct ccw_device *);
104void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
105void ccw_device_sense_id_done(struct ccw_device *, int); 111void ccw_device_sense_id_done(struct ccw_device *, int);
106 112
107/* Function prototypes for path grouping stuff. */ 113/* Function prototypes for path grouping stuff. */
108void ccw_device_sense_pgid_start(struct ccw_device *);
109void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
110void ccw_device_sense_pgid_done(struct ccw_device *, int);
111
112void ccw_device_verify_start(struct ccw_device *); 114void ccw_device_verify_start(struct ccw_device *);
113void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
114void ccw_device_verify_done(struct ccw_device *, int); 115void ccw_device_verify_done(struct ccw_device *, int);
115 116
116void ccw_device_disband_start(struct ccw_device *); 117void ccw_device_disband_start(struct ccw_device *);
117void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
118void ccw_device_disband_done(struct ccw_device *, int); 118void ccw_device_disband_done(struct ccw_device *, int);
119 119
120void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
121void ccw_device_stlck_done(struct ccw_device *, void *, int);
122
120int ccw_device_call_handler(struct ccw_device *); 123int ccw_device_call_handler(struct ccw_device *);
121 124
122int ccw_device_stlck(struct ccw_device *); 125int ccw_device_stlck(struct ccw_device *);
@@ -125,6 +128,7 @@ int ccw_device_stlck(struct ccw_device *);
125void ccw_device_trigger_reprobe(struct ccw_device *); 128void ccw_device_trigger_reprobe(struct ccw_device *);
126void ccw_device_kill_io(struct ccw_device *); 129void ccw_device_kill_io(struct ccw_device *);
127int ccw_device_notify(struct ccw_device *, int); 130int ccw_device_notify(struct ccw_device *, int);
131void ccw_device_set_disconnected(struct ccw_device *cdev);
128void ccw_device_set_notoper(struct ccw_device *cdev); 132void ccw_device_set_notoper(struct ccw_device *cdev);
129 133
130/* qdio needs this. */ 134/* qdio needs this. */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e728ce447f6e..ae760658a131 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -229,8 +229,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
229 229
230 sch = to_subchannel(cdev->dev.parent); 230 sch = to_subchannel(cdev->dev.parent);
231 231
232 ccw_device_set_timeout(cdev, 0); 232 if (cio_disable_subchannel(sch))
233 cio_disable_subchannel(sch); 233 state = DEV_STATE_NOT_OPER;
234 /* 234 /*
235 * Now that we tried recognition, we have performed device selection 235 * Now that we tried recognition, we have performed device selection
236 * through ssch() and the path information is up to date. 236 * through ssch() and the path information is up to date.
@@ -263,22 +263,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
263 } 263 }
264 switch (state) { 264 switch (state) {
265 case DEV_STATE_NOT_OPER: 265 case DEV_STATE_NOT_OPER:
266 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
267 "subchannel 0.%x.%04x\n",
268 cdev->private->dev_id.devno,
269 sch->schid.ssid, sch->schid.sch_no);
270 break; 266 break;
271 case DEV_STATE_OFFLINE: 267 case DEV_STATE_OFFLINE:
272 if (!cdev->online) { 268 if (!cdev->online) {
273 ccw_device_update_sense_data(cdev); 269 ccw_device_update_sense_data(cdev);
274 /* Issue device info message. */
275 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
276 "CU Type/Mod = %04X/%02X, Dev Type/Mod "
277 "= %04X/%02X\n",
278 cdev->private->dev_id.ssid,
279 cdev->private->dev_id.devno,
280 cdev->id.cu_type, cdev->id.cu_model,
281 cdev->id.dev_type, cdev->id.dev_model);
282 break; 270 break;
283 } 271 }
284 cdev->private->state = DEV_STATE_OFFLINE; 272 cdev->private->state = DEV_STATE_OFFLINE;
@@ -289,16 +277,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
289 wake_up(&cdev->private->wait_q); 277 wake_up(&cdev->private->wait_q);
290 } else { 278 } else {
291 ccw_device_update_sense_data(cdev); 279 ccw_device_update_sense_data(cdev);
292 PREPARE_WORK(&cdev->private->kick_work, 280 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
293 ccw_device_do_unbind_bind);
294 queue_work(ccw_device_work, &cdev->private->kick_work);
295 } 281 }
296 return; 282 return;
297 case DEV_STATE_BOXED: 283 case DEV_STATE_BOXED:
298 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
299 " subchannel 0.%x.%04x\n",
300 cdev->private->dev_id.devno,
301 sch->schid.ssid, sch->schid.sch_no);
302 if (cdev->id.cu_type != 0) { /* device was recognized before */ 284 if (cdev->id.cu_type != 0) { /* device was recognized before */
303 cdev->private->flags.recog_done = 1; 285 cdev->private->flags.recog_done = 1;
304 cdev->private->state = DEV_STATE_BOXED; 286 cdev->private->state = DEV_STATE_BOXED;
@@ -343,28 +325,16 @@ int ccw_device_notify(struct ccw_device *cdev, int event)
343 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 325 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
344} 326}
345 327
346static void cmf_reenable_delayed(struct work_struct *work)
347{
348 struct ccw_device_private *priv;
349 struct ccw_device *cdev;
350
351 priv = container_of(work, struct ccw_device_private, kick_work);
352 cdev = priv->cdev;
353 cmf_reenable(cdev);
354}
355
356static void ccw_device_oper_notify(struct ccw_device *cdev) 328static void ccw_device_oper_notify(struct ccw_device *cdev)
357{ 329{
358 if (ccw_device_notify(cdev, CIO_OPER)) { 330 if (ccw_device_notify(cdev, CIO_OPER)) {
359 /* Reenable channel measurements, if needed. */ 331 /* Reenable channel measurements, if needed. */
360 PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); 332 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
361 queue_work(ccw_device_work, &cdev->private->kick_work);
362 return; 333 return;
363 } 334 }
364 /* Driver doesn't want device back. */ 335 /* Driver doesn't want device back. */
365 ccw_device_set_notoper(cdev); 336 ccw_device_set_notoper(cdev);
366 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); 337 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
367 queue_work(ccw_device_work, &cdev->private->kick_work);
368} 338}
369 339
370/* 340/*
@@ -387,19 +357,35 @@ ccw_device_done(struct ccw_device *cdev, int state)
387 357
388 cdev->private->state = state; 358 cdev->private->state = state;
389 359
390 if (state == DEV_STATE_BOXED) { 360 switch (state) {
361 case DEV_STATE_BOXED:
391 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 362 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
392 cdev->private->dev_id.devno, sch->schid.sch_no); 363 cdev->private->dev_id.devno, sch->schid.sch_no);
393 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 364 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
394 ccw_device_schedule_sch_unregister(cdev); 365 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
395 cdev->private->flags.donotify = 0; 366 cdev->private->flags.donotify = 0;
396 } 367 break;
397 if (state == DEV_STATE_NOT_OPER) { 368 case DEV_STATE_NOT_OPER:
398 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 369 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
399 cdev->private->dev_id.devno, sch->schid.sch_no); 370 cdev->private->dev_id.devno, sch->schid.sch_no);
400 if (!ccw_device_notify(cdev, CIO_GONE)) 371 if (!ccw_device_notify(cdev, CIO_GONE))
401 ccw_device_schedule_sch_unregister(cdev); 372 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
373 else
374 ccw_device_set_disconnected(cdev);
402 cdev->private->flags.donotify = 0; 375 cdev->private->flags.donotify = 0;
376 break;
377 case DEV_STATE_DISCONNECTED:
378 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
379 "%04x\n", cdev->private->dev_id.devno,
380 sch->schid.sch_no);
381 if (!ccw_device_notify(cdev, CIO_NO_PATH))
382 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
383 else
384 ccw_device_set_disconnected(cdev);
385 cdev->private->flags.donotify = 0;
386 break;
387 default:
388 break;
403 } 389 }
404 390
405 if (cdev->private->flags.donotify) { 391 if (cdev->private->flags.donotify) {
@@ -409,107 +395,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
409 wake_up(&cdev->private->wait_q); 395 wake_up(&cdev->private->wait_q);
410} 396}
411 397
412static int cmp_pgid(struct pgid *p1, struct pgid *p2)
413{
414 char *c1;
415 char *c2;
416
417 c1 = (char *)p1;
418 c2 = (char *)p2;
419
420 return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
421}
422
423static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
424{
425 int i;
426 int last;
427
428 last = 0;
429 for (i = 0; i < 8; i++) {
430 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
431 /* No PGID yet */
432 continue;
433 if (cdev->private->pgid[last].inf.ps.state1 ==
434 SNID_STATE1_RESET) {
435 /* First non-zero PGID */
436 last = i;
437 continue;
438 }
439 if (cmp_pgid(&cdev->private->pgid[i],
440 &cdev->private->pgid[last]) == 0)
441 /* Non-conflicting PGIDs */
442 continue;
443
444 /* PGID mismatch, can't pathgroup. */
445 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
446 "0.%x.%04x, can't pathgroup\n",
447 cdev->private->dev_id.ssid,
448 cdev->private->dev_id.devno);
449 cdev->private->options.pgroup = 0;
450 return;
451 }
452 if (cdev->private->pgid[last].inf.ps.state1 ==
453 SNID_STATE1_RESET)
454 /* No previous pgid found */
455 memcpy(&cdev->private->pgid[0],
456 &channel_subsystems[0]->global_pgid,
457 sizeof(struct pgid));
458 else
459 /* Use existing pgid */
460 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
461 sizeof(struct pgid));
462}
463
464/*
465 * Function called from device_pgid.c after sense path ground has completed.
466 */
467void
468ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
469{
470 struct subchannel *sch;
471
472 sch = to_subchannel(cdev->dev.parent);
473 switch (err) {
474 case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
475 cdev->private->options.pgroup = 0;
476 break;
477 case 0: /* success */
478 case -EACCES: /* partial success, some paths not operational */
479 /* Check if all pgids are equal or 0. */
480 __ccw_device_get_common_pgid(cdev);
481 break;
482 case -ETIME: /* Sense path group id stopped by timeout. */
483 case -EUSERS: /* device is reserved for someone else. */
484 ccw_device_done(cdev, DEV_STATE_BOXED);
485 return;
486 default:
487 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
488 return;
489 }
490 /* Start Path Group verification. */
491 cdev->private->state = DEV_STATE_VERIFY;
492 cdev->private->flags.doverify = 0;
493 ccw_device_verify_start(cdev);
494}
495
496/* 398/*
497 * Start device recognition. 399 * Start device recognition.
498 */ 400 */
499int 401void ccw_device_recognition(struct ccw_device *cdev)
500ccw_device_recognition(struct ccw_device *cdev)
501{ 402{
502 struct subchannel *sch; 403 struct subchannel *sch = to_subchannel(cdev->dev.parent);
503 int ret;
504
505 sch = to_subchannel(cdev->dev.parent);
506 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
507 if (ret != 0)
508 /* Couldn't enable the subchannel for i/o. Sick device. */
509 return ret;
510
511 /* After 60s the device recognition is considered to have failed. */
512 ccw_device_set_timeout(cdev, 60*HZ);
513 404
514 /* 405 /*
515 * We used to start here with a sense pgid to find out whether a device 406 * We used to start here with a sense pgid to find out whether a device
@@ -521,32 +412,33 @@ ccw_device_recognition(struct ccw_device *cdev)
521 */ 412 */
522 cdev->private->flags.recog_done = 0; 413 cdev->private->flags.recog_done = 0;
523 cdev->private->state = DEV_STATE_SENSE_ID; 414 cdev->private->state = DEV_STATE_SENSE_ID;
415 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
416 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
417 return;
418 }
524 ccw_device_sense_id_start(cdev); 419 ccw_device_sense_id_start(cdev);
525 return 0;
526} 420}
527 421
528/* 422/*
529 * Handle timeout in device recognition. 423 * Handle events for states that use the ccw request infrastructure.
530 */ 424 */
531static void 425static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
532ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
533{ 426{
534 int ret; 427 switch (e) {
535 428 case DEV_EVENT_NOTOPER:
536 ret = ccw_device_cancel_halt_clear(cdev); 429 ccw_request_notoper(cdev);
537 switch (ret) {
538 case 0:
539 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
540 break; 430 break;
541 case -ENODEV: 431 case DEV_EVENT_INTERRUPT:
542 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 432 ccw_request_handler(cdev);
433 break;
434 case DEV_EVENT_TIMEOUT:
435 ccw_request_timeout(cdev);
543 break; 436 break;
544 default: 437 default:
545 ccw_device_set_timeout(cdev, 3*HZ); 438 break;
546 } 439 }
547} 440}
548 441
549
550void 442void
551ccw_device_verify_done(struct ccw_device *cdev, int err) 443ccw_device_verify_done(struct ccw_device *cdev, int err)
552{ 444{
@@ -555,21 +447,18 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
555 sch = to_subchannel(cdev->dev.parent); 447 sch = to_subchannel(cdev->dev.parent);
556 /* Update schib - pom may have changed. */ 448 /* Update schib - pom may have changed. */
557 if (cio_update_schib(sch)) { 449 if (cio_update_schib(sch)) {
558 cdev->private->flags.donotify = 0; 450 err = -ENODEV;
559 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 451 goto callback;
560 return;
561 } 452 }
562 /* Update lpm with verified path mask. */ 453 /* Update lpm with verified path mask. */
563 sch->lpm = sch->vpm; 454 sch->lpm = sch->vpm;
564 /* Repeat path verification? */ 455 /* Repeat path verification? */
565 if (cdev->private->flags.doverify) { 456 if (cdev->private->flags.doverify) {
566 cdev->private->flags.doverify = 0;
567 ccw_device_verify_start(cdev); 457 ccw_device_verify_start(cdev);
568 return; 458 return;
569 } 459 }
460callback:
570 switch (err) { 461 switch (err) {
571 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
572 cdev->private->options.pgroup = 0;
573 case 0: 462 case 0:
574 ccw_device_done(cdev, DEV_STATE_ONLINE); 463 ccw_device_done(cdev, DEV_STATE_ONLINE);
575 /* Deliver fake irb to device driver, if needed. */ 464 /* Deliver fake irb to device driver, if needed. */
@@ -588,18 +477,20 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
588 } 477 }
589 break; 478 break;
590 case -ETIME: 479 case -ETIME:
480 case -EUSERS:
591 /* Reset oper notify indication after verify error. */ 481 /* Reset oper notify indication after verify error. */
592 cdev->private->flags.donotify = 0; 482 cdev->private->flags.donotify = 0;
593 ccw_device_done(cdev, DEV_STATE_BOXED); 483 ccw_device_done(cdev, DEV_STATE_BOXED);
594 break; 484 break;
485 case -EACCES:
486 /* Reset oper notify indication after verify error. */
487 cdev->private->flags.donotify = 0;
488 ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
489 break;
595 default: 490 default:
596 /* Reset oper notify indication after verify error. */ 491 /* Reset oper notify indication after verify error. */
597 cdev->private->flags.donotify = 0; 492 cdev->private->flags.donotify = 0;
598 if (cdev->online) { 493 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
599 ccw_device_set_timeout(cdev, 0);
600 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
601 } else
602 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
603 break; 494 break;
604 } 495 }
605} 496}
@@ -624,17 +515,9 @@ ccw_device_online(struct ccw_device *cdev)
624 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 515 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
625 return ret; 516 return ret;
626 } 517 }
627 /* Do we want to do path grouping? */ 518 /* Start initial path verification. */
628 if (!cdev->private->options.pgroup) { 519 cdev->private->state = DEV_STATE_VERIFY;
629 /* Start initial path verification. */ 520 ccw_device_verify_start(cdev);
630 cdev->private->state = DEV_STATE_VERIFY;
631 cdev->private->flags.doverify = 0;
632 ccw_device_verify_start(cdev);
633 return 0;
634 }
635 /* Do a SensePGID first. */
636 cdev->private->state = DEV_STATE_SENSE_PGID;
637 ccw_device_sense_pgid_start(cdev);
638 return 0; 521 return 0;
639} 522}
640 523
@@ -650,7 +533,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
650 break; 533 break;
651 default: 534 default:
652 cdev->private->flags.donotify = 0; 535 cdev->private->flags.donotify = 0;
653 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
654 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 536 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
655 break; 537 break;
656 } 538 }
@@ -671,6 +553,10 @@ ccw_device_offline(struct ccw_device *cdev)
671 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 553 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
672 return 0; 554 return 0;
673 } 555 }
556 if (cdev->private->state == DEV_STATE_BOXED) {
557 ccw_device_done(cdev, DEV_STATE_BOXED);
558 return 0;
559 }
674 if (ccw_device_is_orphan(cdev)) { 560 if (ccw_device_is_orphan(cdev)) {
675 ccw_device_done(cdev, DEV_STATE_OFFLINE); 561 ccw_device_done(cdev, DEV_STATE_OFFLINE);
676 return 0; 562 return 0;
@@ -683,7 +569,7 @@ ccw_device_offline(struct ccw_device *cdev)
683 if (cdev->private->state != DEV_STATE_ONLINE) 569 if (cdev->private->state != DEV_STATE_ONLINE)
684 return -EINVAL; 570 return -EINVAL;
685 /* Are we doing path grouping? */ 571 /* Are we doing path grouping? */
686 if (!cdev->private->options.pgroup) { 572 if (!cdev->private->flags.pgroup) {
687 /* No, set state offline immediately. */ 573 /* No, set state offline immediately. */
688 ccw_device_done(cdev, DEV_STATE_OFFLINE); 574 ccw_device_done(cdev, DEV_STATE_OFFLINE);
689 return 0; 575 return 0;
@@ -695,46 +581,15 @@ ccw_device_offline(struct ccw_device *cdev)
695} 581}
696 582
697/* 583/*
698 * Handle timeout in device online/offline process.
699 */
700static void
701ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
702{
703 int ret;
704
705 ret = ccw_device_cancel_halt_clear(cdev);
706 switch (ret) {
707 case 0:
708 ccw_device_done(cdev, DEV_STATE_BOXED);
709 break;
710 case -ENODEV:
711 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
712 break;
713 default:
714 ccw_device_set_timeout(cdev, 3*HZ);
715 }
716}
717
718/*
719 * Handle not oper event in device recognition.
720 */
721static void
722ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
723{
724 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
725}
726
727/*
728 * Handle not operational event in non-special state. 584 * Handle not operational event in non-special state.
729 */ 585 */
730static void ccw_device_generic_notoper(struct ccw_device *cdev, 586static void ccw_device_generic_notoper(struct ccw_device *cdev,
731 enum dev_event dev_event) 587 enum dev_event dev_event)
732{ 588{
733 struct subchannel *sch; 589 if (!ccw_device_notify(cdev, CIO_GONE))
734 590 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
735 ccw_device_set_notoper(cdev); 591 else
736 sch = to_subchannel(cdev->dev.parent); 592 ccw_device_set_disconnected(cdev);
737 css_schedule_eval(sch->schid);
738} 593}
739 594
740/* 595/*
@@ -783,11 +638,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
783 } 638 }
784 /* Device is idle, we can do the path verification. */ 639 /* Device is idle, we can do the path verification. */
785 cdev->private->state = DEV_STATE_VERIFY; 640 cdev->private->state = DEV_STATE_VERIFY;
786 cdev->private->flags.doverify = 0;
787 ccw_device_verify_start(cdev); 641 ccw_device_verify_start(cdev);
788} 642}
789 643
790/* 644/*
645 * Handle path verification event in boxed state.
646 */
647static void ccw_device_boxed_verify(struct ccw_device *cdev,
648 enum dev_event dev_event)
649{
650 struct subchannel *sch = to_subchannel(cdev->dev.parent);
651
652 if (cdev->online) {
653 if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
654 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
655 else
656 ccw_device_online_verify(cdev, dev_event);
657 } else
658 css_schedule_eval(sch->schid);
659}
660
661/*
791 * Got an interrupt for a normal io (state online). 662 * Got an interrupt for a normal io (state online).
792 */ 663 */
793static void 664static void
@@ -885,12 +756,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
885 */ 756 */
886 if (scsw_fctl(&irb->scsw) & 757 if (scsw_fctl(&irb->scsw) &
887 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 758 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
888 /* Retry Basic Sense if requested. */
889 if (cdev->private->flags.intretry) {
890 cdev->private->flags.intretry = 0;
891 ccw_device_do_sense(cdev, irb);
892 return;
893 }
894 cdev->private->flags.dosense = 0; 759 cdev->private->flags.dosense = 0;
895 memset(&cdev->private->irb, 0, sizeof(struct irb)); 760 memset(&cdev->private->irb, 0, sizeof(struct irb));
896 ccw_device_accumulate_irb(cdev, irb); 761 ccw_device_accumulate_irb(cdev, irb);
@@ -914,21 +779,6 @@ call_handler:
914} 779}
915 780
916static void 781static void
917ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
918{
919 struct irb *irb;
920
921 irb = (struct irb *) __LC_IRB;
922 /* Accumulate status. We don't do basic sense. */
923 ccw_device_accumulate_irb(cdev, irb);
924 /* Remember to clear irb to avoid residuals. */
925 memset(&cdev->private->irb, 0, sizeof(struct irb));
926 /* Try to start delayed device verification. */
927 ccw_device_online_verify(cdev, 0);
928 /* Note: Don't call handler for cio initiated clear! */
929}
930
931static void
932ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 782ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
933{ 783{
934 struct subchannel *sch; 784 struct subchannel *sch;
@@ -985,32 +835,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
985} 835}
986 836
987static void 837static void
988ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
989{
990 struct irb *irb;
991
992 switch (dev_event) {
993 case DEV_EVENT_INTERRUPT:
994 irb = (struct irb *) __LC_IRB;
995 /* Check for unsolicited interrupt. */
996 if ((scsw_stctl(&irb->scsw) ==
997 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
998 (!scsw_cc(&irb->scsw)))
999 /* FIXME: we should restart stlck here, but this
1000 * is extremely unlikely ... */
1001 goto out_wakeup;
1002
1003 ccw_device_accumulate_irb(cdev, irb);
1004 /* We don't care about basic sense etc. */
1005 break;
1006 default: /* timeout */
1007 break;
1008 }
1009out_wakeup:
1010 wake_up(&cdev->private->wait_q);
1011}
1012
1013static void
1014ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 838ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1015{ 839{
1016 struct subchannel *sch; 840 struct subchannel *sch;
@@ -1019,10 +843,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1019 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 843 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1020 /* Couldn't enable the subchannel for i/o. Sick device. */ 844 /* Couldn't enable the subchannel for i/o. Sick device. */
1021 return; 845 return;
1022
1023 /* After 60s the device recognition is considered to have failed. */
1024 ccw_device_set_timeout(cdev, 60*HZ);
1025
1026 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 846 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1027 ccw_device_sense_id_start(cdev); 847 ccw_device_sense_id_start(cdev);
1028} 848}
@@ -1053,22 +873,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1053 873
1054 /* We should also udate ssd info, but this has to wait. */ 874 /* We should also udate ssd info, but this has to wait. */
1055 /* Check if this is another device which appeared on the same sch. */ 875 /* Check if this is another device which appeared on the same sch. */
1056 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 876 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
1057 PREPARE_WORK(&cdev->private->kick_work, 877 css_schedule_eval(sch->schid);
1058 ccw_device_move_to_orphanage); 878 else
1059 queue_work(slow_path_wq, &cdev->private->kick_work);
1060 } else
1061 ccw_device_start_id(cdev, 0); 879 ccw_device_start_id(cdev, 0);
1062} 880}
1063 881
1064static void 882static void ccw_device_disabled_irq(struct ccw_device *cdev,
1065ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) 883 enum dev_event dev_event)
1066{ 884{
1067 struct subchannel *sch; 885 struct subchannel *sch;
1068 886
1069 sch = to_subchannel(cdev->dev.parent); 887 sch = to_subchannel(cdev->dev.parent);
1070 /* 888 /*
1071 * An interrupt in state offline means a previous disable was not 889 * An interrupt in a disabled state means a previous disable was not
1072 * successful - should not happen, but we try to disable again. 890 * successful - should not happen, but we try to disable again.
1073 */ 891 */
1074 cio_disable_subchannel(sch); 892 cio_disable_subchannel(sch);
@@ -1094,10 +912,7 @@ static void
1094ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 912ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1095{ 913{
1096 ccw_device_set_timeout(cdev, 0); 914 ccw_device_set_timeout(cdev, 0);
1097 if (dev_event == DEV_EVENT_NOTOPER) 915 cdev->private->state = DEV_STATE_NOT_OPER;
1098 cdev->private->state = DEV_STATE_NOT_OPER;
1099 else
1100 cdev->private->state = DEV_STATE_OFFLINE;
1101 wake_up(&cdev->private->wait_q); 916 wake_up(&cdev->private->wait_q);
1102} 917}
1103 918
@@ -1107,17 +922,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1107 int ret; 922 int ret;
1108 923
1109 ret = ccw_device_cancel_halt_clear(cdev); 924 ret = ccw_device_cancel_halt_clear(cdev);
1110 switch (ret) { 925 if (ret == -EBUSY) {
1111 case 0: 926 ccw_device_set_timeout(cdev, HZ/10);
1112 cdev->private->state = DEV_STATE_OFFLINE; 927 } else {
1113 wake_up(&cdev->private->wait_q);
1114 break;
1115 case -ENODEV:
1116 cdev->private->state = DEV_STATE_NOT_OPER; 928 cdev->private->state = DEV_STATE_NOT_OPER;
1117 wake_up(&cdev->private->wait_q); 929 wake_up(&cdev->private->wait_q);
1118 break;
1119 default:
1120 ccw_device_set_timeout(cdev, HZ/10);
1121 } 930 }
1122} 931}
1123 932
@@ -1131,50 +940,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1131} 940}
1132 941
1133/* 942/*
1134 * Bug operation action.
1135 */
1136static void
1137ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1138{
1139 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1140 "0.%x.%04x\n", cdev->private->state, dev_event,
1141 cdev->private->dev_id.ssid,
1142 cdev->private->dev_id.devno);
1143 BUG();
1144}
1145
1146/*
1147 * device statemachine 943 * device statemachine
1148 */ 944 */
1149fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 945fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1150 [DEV_STATE_NOT_OPER] = { 946 [DEV_STATE_NOT_OPER] = {
1151 [DEV_EVENT_NOTOPER] = ccw_device_nop, 947 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1152 [DEV_EVENT_INTERRUPT] = ccw_device_bug, 948 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1153 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 949 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1154 [DEV_EVENT_VERIFY] = ccw_device_nop, 950 [DEV_EVENT_VERIFY] = ccw_device_nop,
1155 }, 951 },
1156 [DEV_STATE_SENSE_PGID] = { 952 [DEV_STATE_SENSE_PGID] = {
1157 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 953 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1158 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 954 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1159 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 955 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1160 [DEV_EVENT_VERIFY] = ccw_device_nop, 956 [DEV_EVENT_VERIFY] = ccw_device_nop,
1161 }, 957 },
1162 [DEV_STATE_SENSE_ID] = { 958 [DEV_STATE_SENSE_ID] = {
1163 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 959 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1164 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 960 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1165 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 961 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1166 [DEV_EVENT_VERIFY] = ccw_device_nop, 962 [DEV_EVENT_VERIFY] = ccw_device_nop,
1167 }, 963 },
1168 [DEV_STATE_OFFLINE] = { 964 [DEV_STATE_OFFLINE] = {
1169 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 965 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1170 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 966 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1171 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 967 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1172 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 968 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1173 }, 969 },
1174 [DEV_STATE_VERIFY] = { 970 [DEV_STATE_VERIFY] = {
1175 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 971 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1176 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 972 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1177 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 973 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1178 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 974 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1179 }, 975 },
1180 [DEV_STATE_ONLINE] = { 976 [DEV_STATE_ONLINE] = {
@@ -1190,24 +986,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1190 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 986 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1191 }, 987 },
1192 [DEV_STATE_DISBAND_PGID] = { 988 [DEV_STATE_DISBAND_PGID] = {
1193 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 989 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1194 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 990 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1195 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 991 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1196 [DEV_EVENT_VERIFY] = ccw_device_nop, 992 [DEV_EVENT_VERIFY] = ccw_device_nop,
1197 }, 993 },
1198 [DEV_STATE_BOXED] = { 994 [DEV_STATE_BOXED] = {
1199 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 995 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1200 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 996 [DEV_EVENT_INTERRUPT] = ccw_device_nop,
1201 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1202 [DEV_EVENT_VERIFY] = ccw_device_nop,
1203 },
1204 /* states to wait for i/o completion before doing something */
1205 [DEV_STATE_CLEAR_VERIFY] = {
1206 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1207 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1208 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 997 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1209 [DEV_EVENT_VERIFY] = ccw_device_nop, 998 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
1210 }, 999 },
1000 /* states to wait for i/o completion before doing something */
1211 [DEV_STATE_TIMEOUT_KILL] = { 1001 [DEV_STATE_TIMEOUT_KILL] = {
1212 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1002 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1213 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1003 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
@@ -1224,13 +1014,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1224 [DEV_STATE_DISCONNECTED] = { 1014 [DEV_STATE_DISCONNECTED] = {
1225 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1015 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1226 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1016 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1227 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1017 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1228 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1018 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1229 }, 1019 },
1230 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1020 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1231 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1021 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1232 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1022 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1233 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1023 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1234 [DEV_EVENT_VERIFY] = ccw_device_nop, 1024 [DEV_EVENT_VERIFY] = ccw_device_nop,
1235 }, 1025 },
1236 [DEV_STATE_CMFCHANGE] = { 1026 [DEV_STATE_CMFCHANGE] = {
@@ -1245,6 +1035,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1245 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1035 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1246 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1036 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1247 }, 1037 },
1038 [DEV_STATE_STEAL_LOCK] = {
1039 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1040 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1041 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1042 [DEV_EVENT_VERIFY] = ccw_device_nop,
1043 },
1248}; 1044};
1249 1045
1250EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1046EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 1bdaa614e34f..78a0b43862c5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,40 +1,39 @@
1/* 1/*
2 * drivers/s390/cio/device_id.c 2 * CCW device SENSE ID I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Sense ID functions.
10 */ 8 */
11 9
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h> 10#include <linux/kernel.h>
15 11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/errno.h>
16#include <asm/ccwdev.h> 14#include <asm/ccwdev.h>
17#include <asm/delay.h> 15#include <asm/setup.h>
18#include <asm/cio.h> 16#include <asm/cio.h>
19#include <asm/lowcore.h>
20#include <asm/diag.h> 17#include <asm/diag.h>
21 18
22#include "cio.h" 19#include "cio.h"
23#include "cio_debug.h" 20#include "cio_debug.h"
24#include "css.h"
25#include "device.h" 21#include "device.h"
26#include "ioasm.h"
27#include "io_sch.h" 22#include "io_sch.h"
28 23
24#define SENSE_ID_RETRIES 256
25#define SENSE_ID_TIMEOUT (10 * HZ)
26#define SENSE_ID_MIN_LEN 4
27#define SENSE_ID_BASIC_LEN 7
28
29/** 29/**
30 * vm_vdev_to_cu_type - Convert vm virtual device into control unit type 30 * diag210_to_senseid - convert diag 0x210 data to sense id information
31 * for certain devices. 31 * @senseid: sense id
32 * @class: virtual device class 32 * @diag: diag 0x210 data
33 * @type: virtual device type
34 * 33 *
35 * Returns control unit type if a match was made or %0xffff otherwise. 34 * Return 0 on success, non-zero otherwise.
36 */ 35 */
37static int vm_vdev_to_cu_type(int class, int type) 36static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
38{ 37{
39 static struct { 38 static struct {
40 int class, type, cu_type; 39 int class, type, cu_type;
@@ -71,253 +70,153 @@ static int vm_vdev_to_cu_type(int class, int type)
71 }; 70 };
72 int i; 71 int i;
73 72
74 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) 73 /* Special case for osa devices. */
75 if (class == vm_devices[i].class && type == vm_devices[i].type) 74 if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
76 return vm_devices[i].cu_type; 75 senseid->cu_type = 0x3088;
76 senseid->cu_model = 0x60;
77 senseid->reserved = 0xff;
78 return 0;
79 }
80 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
81 if (diag->vrdcvcla == vm_devices[i].class &&
82 diag->vrdcvtyp == vm_devices[i].type) {
83 senseid->cu_type = vm_devices[i].cu_type;
84 senseid->reserved = 0xff;
85 return 0;
86 }
87 }
77 88
78 return 0xffff; 89 return -ENODEV;
79} 90}
80 91
81/** 92/**
82 * diag_get_dev_info - retrieve device information via DIAG X'210' 93 * diag_get_dev_info - retrieve device information via diag 0x210
83 * @devno: device number 94 * @cdev: ccw device
84 * @ps: pointer to sense ID data area
85 * 95 *
86 * Returns zero on success, non-zero otherwise. 96 * Returns zero on success, non-zero otherwise.
87 */ 97 */
88static int diag_get_dev_info(u16 devno, struct senseid *ps) 98static int diag210_get_dev_info(struct ccw_device *cdev)
89{ 99{
100 struct ccw_dev_id *dev_id = &cdev->private->dev_id;
101 struct senseid *senseid = &cdev->private->senseid;
90 struct diag210 diag_data; 102 struct diag210 diag_data;
91 int ccode; 103 int rc;
92 104
93 CIO_TRACE_EVENT (4, "VMvdinf"); 105 if (dev_id->ssid != 0)
94 106 return -ENODEV;
95 diag_data = (struct diag210) { 107 memset(&diag_data, 0, sizeof(diag_data));
96 .vrdcdvno = devno, 108 diag_data.vrdcdvno = dev_id->devno;
97 .vrdclen = sizeof (diag_data), 109 diag_data.vrdclen = sizeof(diag_data);
98 }; 110 rc = diag210(&diag_data);
99 111 CIO_TRACE_EVENT(4, "diag210");
100 ccode = diag210 (&diag_data); 112 CIO_HEX_EVENT(4, &rc, sizeof(rc));
101 if ((ccode == 0) || (ccode == 2)) { 113 CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
102 ps->reserved = 0xff; 114 if (rc != 0 && rc != 2)
103 115 goto err_failed;
104 /* Special case for osa devices. */ 116 if (diag210_to_senseid(senseid, &diag_data))
105 if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) { 117 goto err_unknown;
106 ps->cu_type = 0x3088; 118 return 0;
107 ps->cu_model = 0x60; 119
108 return 0; 120err_unknown:
109 } 121 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
110 ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla, 122 dev_id->ssid, dev_id->devno);
111 diag_data.vrdcvtyp); 123 return -ENODEV;
112 if (ps->cu_type != 0xffff) 124err_failed:
113 return 0; 125 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
114 } 126 dev_id->ssid, dev_id->devno, rc);
115
116 CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
117 "vdev class : %02X, vdev type : %04X \n ... "
118 "rdev class : %02X, rdev type : %04X, "
119 "rdev model: %02X\n",
120 devno, ccode,
121 diag_data.vrdcvcla, diag_data.vrdcvtyp,
122 diag_data.vrdcrccl, diag_data.vrdccrty,
123 diag_data.vrdccrmd);
124
125 return -ENODEV; 127 return -ENODEV;
126} 128}
127 129
128/* 130/*
129 * Start Sense ID helper function. 131 * Initialize SENSE ID data.
130 * Try to obtain the 'control unit'/'device type' information
131 * associated with the subchannel.
132 */ 132 */
133static int 133static void snsid_init(struct ccw_device *cdev)
134__ccw_device_sense_id_start(struct ccw_device *cdev)
135{
136 struct subchannel *sch;
137 struct ccw1 *ccw;
138 int ret;
139
140 sch = to_subchannel(cdev->dev.parent);
141 /* Setup sense channel program. */
142 ccw = cdev->private->iccws;
143 ccw->cmd_code = CCW_CMD_SENSE_ID;
144 ccw->cda = (__u32) __pa (&cdev->private->senseid);
145 ccw->count = sizeof (struct senseid);
146 ccw->flags = CCW_FLAG_SLI;
147
148 /* Reset device status. */
149 memset(&cdev->private->irb, 0, sizeof(struct irb));
150
151 /* Try on every path. */
152 ret = -ENODEV;
153 while (cdev->private->imask != 0) {
154 cdev->private->senseid.cu_type = 0xFFFF;
155 if ((sch->opm & cdev->private->imask) != 0 &&
156 cdev->private->iretry > 0) {
157 cdev->private->iretry--;
158 /* Reset internal retry indication. */
159 cdev->private->flags.intretry = 0;
160 ret = cio_start (sch, cdev->private->iccws,
161 cdev->private->imask);
162 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
163 if (ret != -EACCES)
164 return ret;
165 }
166 cdev->private->imask >>= 1;
167 cdev->private->iretry = 5;
168 }
169 return ret;
170}
171
172void
173ccw_device_sense_id_start(struct ccw_device *cdev)
174{ 134{
175 int ret; 135 cdev->private->flags.esid = 0;
176 136 memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
177 memset (&cdev->private->senseid, 0, sizeof (struct senseid)); 137 cdev->private->senseid.cu_type = 0xffff;
178 cdev->private->imask = 0x80;
179 cdev->private->iretry = 5;
180 ret = __ccw_device_sense_id_start(cdev);
181 if (ret && ret != -EBUSY)
182 ccw_device_sense_id_done(cdev, ret);
183} 138}
184 139
185/* 140/*
186 * Called from interrupt context to check if a valid answer 141 * Check for complete SENSE ID data.
187 * to Sense ID was received.
188 */ 142 */
189static int 143static int snsid_check(struct ccw_device *cdev, void *data)
190ccw_device_check_sense_id(struct ccw_device *cdev)
191{ 144{
192 struct subchannel *sch; 145 struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
193 struct irb *irb; 146 int len = sizeof(struct senseid) - scsw->count;
194 147
195 sch = to_subchannel(cdev->dev.parent); 148 /* Check for incomplete SENSE ID data. */
196 irb = &cdev->private->irb; 149 if (len < SENSE_ID_MIN_LEN)
197 150 goto out_restart;
198 /* Check the error cases. */ 151 if (cdev->private->senseid.cu_type == 0xffff)
199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 152 goto out_restart;
200 /* Retry Sense ID if requested. */ 153 /* Check for incompatible SENSE ID data. */
201 if (cdev->private->flags.intretry) { 154 if (cdev->private->senseid.reserved != 0xff)
202 cdev->private->flags.intretry = 0;
203 return -EAGAIN;
204 }
205 return -ETIME;
206 }
207 if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
208 /*
209 * if the device doesn't support the SenseID
210 * command further retries wouldn't help ...
211 * NB: We don't check here for intervention required like we
212 * did before, because tape devices with no tape inserted
213 * may present this status *in conjunction with* the
214 * sense id information. So, for intervention required,
215 * we use the "whack it until it talks" strategy...
216 */
217 CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
218 "0.%x.%04x reports cmd reject\n",
219 cdev->private->dev_id.devno, sch->schid.ssid,
220 sch->schid.sch_no);
221 return -EOPNOTSUPP; 155 return -EOPNOTSUPP;
222 } 156 /* Check for extended-identification information. */
223 if (irb->esw.esw0.erw.cons) { 157 if (len > SENSE_ID_BASIC_LEN)
224 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " 158 cdev->private->flags.esid = 1;
225 "lpum %02X, cnt %02d, sns :" 159 return 0;
226 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
227 cdev->private->dev_id.ssid,
228 cdev->private->dev_id.devno,
229 irb->esw.esw0.sublog.lpum,
230 irb->esw.esw0.erw.scnt,
231 irb->ecw[0], irb->ecw[1],
232 irb->ecw[2], irb->ecw[3],
233 irb->ecw[4], irb->ecw[5],
234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN;
236 }
237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm;
239 160
240 lpm = to_io_private(sch)->orb.cmd.lpm; 161out_restart:
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 162 snsid_init(cdev);
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is "
244 "'not operational'\n", lpm,
245 cdev->private->dev_id.devno,
246 sch->schid.ssid, sch->schid.sch_no);
247 return -EACCES;
248 }
249
250 /* Did we get a proper answer ? */
251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1;
255 return 0; /* Success */
256 }
257
258 /* Hmm, whatever happened, try again. */
259 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no,
263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 163 return -EAGAIN;
265} 164}
266 165
267/* 166/*
268 * Got interrupt for Sense ID. 167 * Process SENSE ID request result.
269 */ 168 */
270void 169static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
271ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
272{ 170{
273 struct subchannel *sch; 171 struct ccw_dev_id *id = &cdev->private->dev_id;
274 struct irb *irb; 172 struct senseid *senseid = &cdev->private->senseid;
275 int ret; 173 int vm = 0;
276 174
277 sch = to_subchannel(cdev->dev.parent); 175 if (rc && MACHINE_IS_VM) {
278 irb = (struct irb *) __LC_IRB; 176 /* Try diag 0x210 fallback on z/VM. */
279 /* Retry sense id, if needed. */ 177 snsid_init(cdev);
280 if (irb->scsw.cmd.stctl == 178 if (diag210_get_dev_info(cdev) == 0) {
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 179 rc = 0;
282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { 180 vm = 1;
283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret);
286 } 181 }
287 return;
288 } 182 }
289 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 183 CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
290 return; 184 "%04x/%02x%s\n", id->ssid, id->devno, rc,
291 ret = ccw_device_check_sense_id(cdev); 185 senseid->cu_type, senseid->cu_model, senseid->dev_type,
292 memset(&cdev->private->irb, 0, sizeof(struct irb)); 186 senseid->dev_model, vm ? " (diag210)" : "");
293 switch (ret) { 187 ccw_device_sense_id_done(cdev, rc);
294 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */ 188}
295 case 0: /* Sense id succeeded. */
296 case -ETIME: /* Sense id stopped by timeout. */
297 ccw_device_sense_id_done(cdev, ret);
298 break;
299 case -EACCES: /* channel is not operational. */
300 sch->lpm &= ~cdev->private->imask;
301 cdev->private->imask >>= 1;
302 cdev->private->iretry = 5;
303 /* fall through. */
304 case -EAGAIN: /* try again. */
305 ret = __ccw_device_sense_id_start(cdev);
306 if (ret == 0 || ret == -EBUSY)
307 break;
308 /* fall through. */
309 default: /* Sense ID failed. Try asking VM. */
310 if (MACHINE_IS_VM)
311 ret = diag_get_dev_info(cdev->private->dev_id.devno,
312 &cdev->private->senseid);
313 else
314 /*
315 * If we can't couldn't identify the device type we
316 * consider the device "not operational".
317 */
318 ret = -ENODEV;
319 189
320 ccw_device_sense_id_done(cdev, ret); 190/**
321 break; 191 * ccw_device_sense_id_start - perform SENSE ID
322 } 192 * @cdev: ccw device
193 *
194 * Execute a SENSE ID channel program on @cdev to update its sense id
195 * information. When finished, call ccw_device_sense_id_done with a
196 * return code specifying the result.
197 */
198void ccw_device_sense_id_start(struct ccw_device *cdev)
199{
200 struct subchannel *sch = to_subchannel(cdev->dev.parent);
201 struct ccw_request *req = &cdev->private->req;
202 struct ccw1 *cp = cdev->private->iccws;
203
204 CIO_TRACE_EVENT(4, "snsid");
205 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
206 /* Data setup. */
207 snsid_init(cdev);
208 /* Channel program setup. */
209 cp->cmd_code = CCW_CMD_SENSE_ID;
210 cp->cda = (u32) (addr_t) &cdev->private->senseid;
211 cp->count = sizeof(struct senseid);
212 cp->flags = CCW_FLAG_SLI;
213 /* Request setup. */
214 memset(req, 0, sizeof(*req));
215 req->cp = cp;
216 req->timeout = SENSE_ID_TIMEOUT;
217 req->maxretries = SENSE_ID_RETRIES;
218 req->lpm = sch->schib.pmcw.pam & sch->opm;
219 req->check = snsid_check;
220 req->callback = snsid_callback;
221 ccw_request_start(cdev);
323} 222}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 2d0efee8a290..6da84543dfe9 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/completion.h>
14 15
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include <asm/idals.h> 17#include <asm/idals.h>
@@ -46,6 +47,7 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
46 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; 47 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
47 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; 48 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
48 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; 49 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
50 cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
49 return 0; 51 return 0;
50} 52}
51 53
@@ -74,6 +76,7 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
74 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0; 76 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
75 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0; 77 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
76 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0; 78 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
79 cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
77 return 0; 80 return 0;
78} 81}
79 82
@@ -90,9 +93,34 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
90 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0; 93 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
91 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0; 94 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
92 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; 95 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
96 cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
93} 97}
94 98
95/** 99/**
100 * ccw_device_is_pathgroup - determine if paths to this device are grouped
101 * @cdev: ccw device
102 *
103 * Return non-zero if there is a path group, zero otherwise.
104 */
105int ccw_device_is_pathgroup(struct ccw_device *cdev)
106{
107 return cdev->private->flags.pgroup;
108}
109EXPORT_SYMBOL(ccw_device_is_pathgroup);
110
111/**
112 * ccw_device_is_multipath - determine if device is operating in multipath mode
113 * @cdev: ccw device
114 *
115 * Return non-zero if device is operating in multipath mode, zero otherwise.
116 */
117int ccw_device_is_multipath(struct ccw_device *cdev)
118{
119 return cdev->private->flags.mpath;
120}
121EXPORT_SYMBOL(ccw_device_is_multipath);
122
123/**
96 * ccw_device_clear() - terminate I/O request processing 124 * ccw_device_clear() - terminate I/O request processing
97 * @cdev: target ccw device 125 * @cdev: target ccw device
98 * @intparm: interruption parameter; value is only used if no I/O is 126 * @intparm: interruption parameter; value is only used if no I/O is
@@ -167,8 +195,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
167 return -EINVAL; 195 return -EINVAL;
168 if (cdev->private->state == DEV_STATE_NOT_OPER) 196 if (cdev->private->state == DEV_STATE_NOT_OPER)
169 return -ENODEV; 197 return -ENODEV;
170 if (cdev->private->state == DEV_STATE_VERIFY || 198 if (cdev->private->state == DEV_STATE_VERIFY) {
171 cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
172 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
173 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
174 cdev->private->flags.fake_irb = 1; 201 cdev->private->flags.fake_irb = 1;
@@ -478,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
478 return sch->lpm; 505 return sch->lpm;
479} 506}
480 507
481/* 508struct stlck_data {
482 * Try to break the lock on a boxed device. 509 struct completion done;
483 */ 510 int rc;
484int 511};
485ccw_device_stlck(struct ccw_device *cdev)
486{
487 void *buf, *buf2;
488 unsigned long flags;
489 struct subchannel *sch;
490 int ret;
491 512
492 if (!cdev) 513void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
493 return -ENODEV; 514{
515 struct stlck_data *sdata = data;
494 516
495 if (cdev->drv && !cdev->private->options.force) 517 sdata->rc = rc;
496 return -EINVAL; 518 complete(&sdata->done);
519}
497 520
498 sch = to_subchannel(cdev->dev.parent); 521/*
499 522 * Perform unconditional reserve + release.
500 CIO_TRACE_EVENT(2, "stl lock"); 523 */
501 CIO_TRACE_EVENT(2, dev_name(&cdev->dev)); 524int ccw_device_stlck(struct ccw_device *cdev)
525{
526 struct subchannel *sch = to_subchannel(cdev->dev.parent);
527 struct stlck_data data;
528 u8 *buffer;
529 int rc;
502 530
503 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 531 /* Check if steal lock operation is valid for this device. */
504 if (!buf) 532 if (cdev->drv) {
505 return -ENOMEM; 533 if (!cdev->private->options.force)
506 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 534 return -EINVAL;
507 if (!buf2) {
508 kfree(buf);
509 return -ENOMEM;
510 } 535 }
511 spin_lock_irqsave(sch->lock, flags); 536 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
512 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 537 if (!buffer)
513 if (ret) 538 return -ENOMEM;
514 goto out_unlock; 539 init_completion(&data.done);
515 /* 540 data.rc = -EIO;
516 * Setup ccw. We chain an unconditional reserve and a release so we 541 spin_lock_irq(sch->lock);
517 * only break the lock. 542 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
518 */ 543 if (rc)
519 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
520 cdev->private->iccws[0].cda = (__u32) __pa(buf);
521 cdev->private->iccws[0].count = 32;
522 cdev->private->iccws[0].flags = CCW_FLAG_CC;
523 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
524 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
525 cdev->private->iccws[1].count = 32;
526 cdev->private->iccws[1].flags = 0;
527 ret = cio_start(sch, cdev->private->iccws, 0);
528 if (ret) {
529 cio_disable_subchannel(sch); //FIXME: return code?
530 goto out_unlock; 544 goto out_unlock;
545 /* Perform operation. */
546 cdev->private->state = DEV_STATE_STEAL_LOCK,
547 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
548 spin_unlock_irq(sch->lock);
549 /* Wait for operation to finish. */
550 if (wait_for_completion_interruptible(&data.done)) {
551 /* Got a signal. */
552 spin_lock_irq(sch->lock);
553 ccw_request_cancel(cdev);
554 spin_unlock_irq(sch->lock);
555 wait_for_completion(&data.done);
531 } 556 }
532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 557 rc = data.rc;
533 spin_unlock_irqrestore(sch->lock, flags); 558 /* Check results. */
534 wait_event(cdev->private->wait_q, 559 spin_lock_irq(sch->lock);
535 cdev->private->irb.scsw.cmd.actl == 0); 560 cio_disable_subchannel(sch);
536 spin_lock_irqsave(sch->lock, flags); 561 cdev->private->state = DEV_STATE_BOXED;
537 cio_disable_subchannel(sch); //FIXME: return code?
538 if ((cdev->private->irb.scsw.cmd.dstat !=
539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
540 (cdev->private->irb.scsw.cmd.cstat != 0))
541 ret = -EIO;
542 /* Clear irb. */
543 memset(&cdev->private->irb, 0, sizeof(struct irb));
544out_unlock: 562out_unlock:
545 kfree(buf); 563 spin_unlock_irq(sch->lock);
546 kfree(buf2); 564 kfree(buffer);
547 spin_unlock_irqrestore(sch->lock, flags); 565
548 return ret; 566 return rc;
549} 567}
550 568
551void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 569void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index fc5ca1dd52b3..aad188e43b4f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,594 +1,561 @@
1/* 1/*
2 * drivers/s390/cio/device_pgid.c 2 * CCW device PGID and path verification I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Path Group ID functions.
10 */ 8 */
11 9
12#include <linux/module.h> 10#include <linux/kernel.h>
13#include <linux/init.h> 11#include <linux/string.h>
14 12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/bitops.h>
15#include <asm/ccwdev.h> 15#include <asm/ccwdev.h>
16#include <asm/cio.h> 16#include <asm/cio.h>
17#include <asm/delay.h>
18#include <asm/lowcore.h>
19 17
20#include "cio.h" 18#include "cio.h"
21#include "cio_debug.h" 19#include "cio_debug.h"
22#include "css.h"
23#include "device.h" 20#include "device.h"
24#include "ioasm.h"
25#include "io_sch.h" 21#include "io_sch.h"
26 22
23#define PGID_RETRIES 256
24#define PGID_TIMEOUT (10 * HZ)
25
27/* 26/*
28 * Helper function called from interrupt context to decide whether an 27 * Process path verification data and report result.
29 * operation should be tried again.
30 */ 28 */
31static int __ccw_device_should_retry(union scsw *scsw) 29static void verify_done(struct ccw_device *cdev, int rc)
32{ 30{
33 /* CC is only valid if start function bit is set. */ 31 struct subchannel *sch = to_subchannel(cdev->dev.parent);
34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) 32 struct ccw_dev_id *id = &cdev->private->dev_id;
35 return 1; 33 int mpath = cdev->private->flags.mpath;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 34 int pgroup = cdev->private->flags.pgroup;
37 if (!scsw->cmd.actl) 35
38 return 1; 36 if (rc)
39 return 0; 37 goto out;
38 /* Ensure consistent multipathing state at device and channel. */
39 if (sch->config.mp != mpath) {
40 sch->config.mp = mpath;
41 rc = cio_commit_config(sch);
42 }
43out:
44 CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
45 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
46 sch->vpm);
47 ccw_device_verify_done(cdev, rc);
40} 48}
41 49
42/* 50/*
43 * Start Sense Path Group ID helper function. Used in ccw_device_recog 51 * Create channel program to perform a NOOP.
44 * and ccw_device_sense_pgid.
45 */ 52 */
46static int 53static void nop_build_cp(struct ccw_device *cdev)
47__ccw_device_sense_pgid_start(struct ccw_device *cdev)
48{ 54{
49 struct subchannel *sch; 55 struct ccw_request *req = &cdev->private->req;
50 struct ccw1 *ccw; 56 struct ccw1 *cp = cdev->private->iccws;
51 int ret; 57
52 int i; 58 cp->cmd_code = CCW_CMD_NOOP;
53 59 cp->cda = 0;
54 sch = to_subchannel(cdev->dev.parent); 60 cp->count = 0;
55 /* Return if we already checked on all paths. */ 61 cp->flags = CCW_FLAG_SLI;
56 if (cdev->private->imask == 0) 62 req->cp = cp;
57 return (sch->lpm == 0) ? -ENODEV : -EACCES;
58 i = 8 - ffs(cdev->private->imask);
59
60 /* Setup sense path group id channel program. */
61 ccw = cdev->private->iccws;
62 ccw->cmd_code = CCW_CMD_SENSE_PGID;
63 ccw->count = sizeof (struct pgid);
64 ccw->flags = CCW_FLAG_SLI;
65
66 /* Reset device status. */
67 memset(&cdev->private->irb, 0, sizeof(struct irb));
68 /* Try on every path. */
69 ret = -ENODEV;
70 while (cdev->private->imask != 0) {
71 /* Try every path multiple times. */
72 ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
73 if (cdev->private->iretry > 0) {
74 cdev->private->iretry--;
75 /* Reset internal retry indication. */
76 cdev->private->flags.intretry = 0;
77 ret = cio_start (sch, cdev->private->iccws,
78 cdev->private->imask);
79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
80 if (ret != -EACCES)
81 return ret;
82 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
83 "0.%x.%04x, lpm %02X, became 'not "
84 "operational'\n",
85 cdev->private->dev_id.devno,
86 sch->schid.ssid,
87 sch->schid.sch_no, cdev->private->imask);
88
89 }
90 cdev->private->imask >>= 1;
91 cdev->private->iretry = 5;
92 i++;
93 }
94
95 return ret;
96} 63}
97 64
98void 65/*
99ccw_device_sense_pgid_start(struct ccw_device *cdev) 66 * Perform NOOP on a single path.
67 */
68static void nop_do(struct ccw_device *cdev)
100{ 69{
101 int ret; 70 struct subchannel *sch = to_subchannel(cdev->dev.parent);
102 71 struct ccw_request *req = &cdev->private->req;
103 /* Set a timeout of 60s */ 72
104 ccw_device_set_timeout(cdev, 60*HZ); 73 /* Adjust lpm. */
105 74 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
106 cdev->private->state = DEV_STATE_SENSE_PGID; 75 if (!req->lpm)
107 cdev->private->imask = 0x80; 76 goto out_nopath;
108 cdev->private->iretry = 5; 77 nop_build_cp(cdev);
109 memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid)); 78 ccw_request_start(cdev);
110 ret = __ccw_device_sense_pgid_start(cdev); 79 return;
111 if (ret && ret != -EBUSY) 80
112 ccw_device_sense_pgid_done(cdev, ret); 81out_nopath:
82 verify_done(cdev, sch->vpm ? 0 : -EACCES);
113} 83}
114 84
115/* 85/*
116 * Called from interrupt context to check if a valid answer 86 * Adjust NOOP I/O status.
117 * to Sense Path Group ID was received.
118 */ 87 */
119static int 88static enum io_status nop_filter(struct ccw_device *cdev, void *data,
120__ccw_device_check_sense_pgid(struct ccw_device *cdev) 89 struct irb *irb, enum io_status status)
121{ 90{
122 struct subchannel *sch; 91 /* Only subchannel status might indicate a path error. */
123 struct irb *irb; 92 if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
124 int i; 93 return IO_DONE;
125 94 return status;
126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb;
128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0;
132 return -EAGAIN;
133 }
134 return -ETIME;
135 }
136 if (irb->esw.esw0.erw.cons &&
137 (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
138 /*
139 * If the device doesn't support the Sense Path Group ID
140 * command further retries wouldn't help ...
141 */
142 return -EOPNOTSUPP;
143 }
144 if (irb->esw.esw0.erw.cons) {
145 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
146 "lpum %02X, cnt %02d, sns : "
147 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
148 cdev->private->dev_id.ssid,
149 cdev->private->dev_id.devno,
150 irb->esw.esw0.sublog.lpum,
151 irb->esw.esw0.erw.scnt,
152 irb->ecw[0], irb->ecw[1],
153 irb->ecw[2], irb->ecw[3],
154 irb->ecw[4], irb->ecw[5],
155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN;
157 }
158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm;
160
161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid,
165 sch->schid.sch_no, lpm);
166 return -EACCES;
167 }
168 i = 8 - ffs(cdev->private->imask);
169 if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
170 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
171 "is reserved by someone else\n",
172 cdev->private->dev_id.devno, sch->schid.ssid,
173 sch->schid.sch_no);
174 return -EUSERS;
175 }
176 return 0;
177} 95}
178 96
179/* 97/*
180 * Got interrupt for Sense Path Group ID. 98 * Process NOOP request result for a single path.
181 */ 99 */
182void 100static void nop_callback(struct ccw_device *cdev, void *data, int rc)
183ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
184{ 101{
185 struct subchannel *sch; 102 struct subchannel *sch = to_subchannel(cdev->dev.parent);
186 struct irb *irb; 103 struct ccw_request *req = &cdev->private->req;
187 int ret; 104
188 105 if (rc == 0)
189 irb = (struct irb *) __LC_IRB; 106 sch->vpm |= req->lpm;
190 107 else if (rc != -EACCES)
191 if (irb->scsw.cmd.stctl == 108 goto err;
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 109 req->lpm >>= 1;
193 if (__ccw_device_should_retry(&irb->scsw)) { 110 nop_do(cdev);
194 ret = __ccw_device_sense_pgid_start(cdev); 111 return;
195 if (ret && ret != -EBUSY) 112
196 ccw_device_sense_pgid_done(cdev, ret); 113err:
197 } 114 verify_done(cdev, rc);
198 return;
199 }
200 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
201 return;
202 sch = to_subchannel(cdev->dev.parent);
203 ret = __ccw_device_check_sense_pgid(cdev);
204 memset(&cdev->private->irb, 0, sizeof(struct irb));
205 switch (ret) {
206 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
207 case -EOPNOTSUPP: /* Sense Path Group ID not supported */
208 ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
209 break;
210 case -ETIME: /* Sense path group id stopped by timeout. */
211 ccw_device_sense_pgid_done(cdev, -ETIME);
212 break;
213 case -EACCES: /* channel is not operational. */
214 sch->lpm &= ~cdev->private->imask;
215 /* Fall through. */
216 case 0: /* Sense Path Group ID successful. */
217 cdev->private->imask >>= 1;
218 cdev->private->iretry = 5;
219 /* Fall through. */
220 case -EAGAIN: /* Try again. */
221 ret = __ccw_device_sense_pgid_start(cdev);
222 if (ret != 0 && ret != -EBUSY)
223 ccw_device_sense_pgid_done(cdev, ret);
224 break;
225 case -EUSERS: /* device is reserved for someone else. */
226 ccw_device_sense_pgid_done(cdev, -EUSERS);
227 break;
228 }
229} 115}
230 116
231/* 117/*
232 * Path Group ID helper function. 118 * Create channel program to perform SET PGID on a single path.
233 */ 119 */
234static int 120static void spid_build_cp(struct ccw_device *cdev, u8 fn)
235__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
236{ 121{
237 struct subchannel *sch; 122 struct ccw_request *req = &cdev->private->req;
238 struct ccw1 *ccw; 123 struct ccw1 *cp = cdev->private->iccws;
239 int ret; 124 int i = 8 - ffs(req->lpm);
240 125 struct pgid *pgid = &cdev->private->pgid[i];
241 sch = to_subchannel(cdev->dev.parent); 126
242 127 pgid->inf.fc = fn;
243 /* Setup sense path group id channel program. */ 128 cp->cmd_code = CCW_CMD_SET_PGID;
244 cdev->private->pgid[0].inf.fc = func; 129 cp->cda = (u32) (addr_t) pgid;
245 ccw = cdev->private->iccws; 130 cp->count = sizeof(*pgid);
246 if (cdev->private->flags.pgid_single) 131 cp->flags = CCW_FLAG_SLI;
247 cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH; 132 req->cp = cp;
248 else
249 cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
250 ccw->cmd_code = CCW_CMD_SET_PGID;
251 ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
252 ccw->count = sizeof (struct pgid);
253 ccw->flags = CCW_FLAG_SLI;
254
255 /* Reset device status. */
256 memset(&cdev->private->irb, 0, sizeof(struct irb));
257
258 /* Try multiple times. */
259 ret = -EACCES;
260 if (cdev->private->iretry > 0) {
261 cdev->private->iretry--;
262 /* Reset internal retry indication. */
263 cdev->private->flags.intretry = 0;
264 ret = cio_start (sch, cdev->private->iccws,
265 cdev->private->imask);
266 /* We expect an interrupt in case of success or busy
267 * indication. */
268 if ((ret == 0) || (ret == -EBUSY))
269 return ret;
270 }
271 /* PGID command failed on this path. */
272 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
273 "0.%x.%04x, lpm %02X, became 'not operational'\n",
274 cdev->private->dev_id.devno, sch->schid.ssid,
275 sch->schid.sch_no, cdev->private->imask);
276 return ret;
277} 133}
278 134
279/* 135/*
280 * Helper function to send a nop ccw down a path. 136 * Perform establish/resign SET PGID on a single path.
281 */ 137 */
282static int __ccw_device_do_nop(struct ccw_device *cdev) 138static void spid_do(struct ccw_device *cdev)
283{ 139{
284 struct subchannel *sch; 140 struct subchannel *sch = to_subchannel(cdev->dev.parent);
285 struct ccw1 *ccw; 141 struct ccw_request *req = &cdev->private->req;
286 int ret; 142 u8 fn;
287 143
288 sch = to_subchannel(cdev->dev.parent); 144 /* Use next available path that is not already in correct state. */
289 145 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
290 /* Setup nop channel program. */ 146 if (!req->lpm)
291 ccw = cdev->private->iccws; 147 goto out_nopath;
292 ccw->cmd_code = CCW_CMD_NOOP; 148 /* Channel program setup. */
293 ccw->cda = 0; 149 if (req->lpm & sch->opm)
294 ccw->count = 0; 150 fn = SPID_FUNC_ESTABLISH;
295 ccw->flags = CCW_FLAG_SLI; 151 else
296 152 fn = SPID_FUNC_RESIGN;
297 /* Reset device status. */ 153 if (cdev->private->flags.mpath)
298 memset(&cdev->private->irb, 0, sizeof(struct irb)); 154 fn |= SPID_FUNC_MULTI_PATH;
299 155 spid_build_cp(cdev, fn);
300 /* Try multiple times. */ 156 ccw_request_start(cdev);
301 ret = -EACCES; 157 return;
302 if (cdev->private->iretry > 0) { 158
303 cdev->private->iretry--; 159out_nopath:
304 /* Reset internal retry indication. */ 160 verify_done(cdev, sch->vpm ? 0 : -EACCES);
305 cdev->private->flags.intretry = 0;
306 ret = cio_start (sch, cdev->private->iccws,
307 cdev->private->imask);
308 /* We expect an interrupt in case of success or busy
309 * indication. */
310 if ((ret == 0) || (ret == -EBUSY))
311 return ret;
312 }
313 /* nop command failed on this path. */
314 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
315 "0.%x.%04x, lpm %02X, became 'not operational'\n",
316 cdev->private->dev_id.devno, sch->schid.ssid,
317 sch->schid.sch_no, cdev->private->imask);
318 return ret;
319} 161}
320 162
163static void verify_start(struct ccw_device *cdev);
321 164
322/* 165/*
323 * Called from interrupt context to check if a valid answer 166 * Process SET PGID request result for a single path.
324 * to Set Path Group ID was received.
325 */ 167 */
326static int 168static void spid_callback(struct ccw_device *cdev, void *data, int rc)
327__ccw_device_check_pgid(struct ccw_device *cdev)
328{ 169{
329 struct subchannel *sch; 170 struct subchannel *sch = to_subchannel(cdev->dev.parent);
330 struct irb *irb; 171 struct ccw_request *req = &cdev->private->req;
331 172
332 sch = to_subchannel(cdev->dev.parent); 173 switch (rc) {
333 irb = &cdev->private->irb; 174 case 0:
334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 175 sch->vpm |= req->lpm & sch->opm;
335 /* Retry Set PGID if requested. */ 176 break;
336 if (cdev->private->flags.intretry) { 177 case -EACCES:
337 cdev->private->flags.intretry = 0; 178 break;
338 return -EAGAIN; 179 case -EOPNOTSUPP:
180 if (cdev->private->flags.mpath) {
181 /* Try without multipathing. */
182 cdev->private->flags.mpath = 0;
183 goto out_restart;
339 } 184 }
340 return -ETIME; 185 /* Try without pathgrouping. */
186 cdev->private->flags.pgroup = 0;
187 goto out_restart;
188 default:
189 goto err;
341 } 190 }
342 if (irb->esw.esw0.erw.cons) { 191 req->lpm >>= 1;
343 if (irb->ecw[0] & SNS0_CMD_REJECT) 192 spid_do(cdev);
344 return -EOPNOTSUPP; 193 return;
345 /* Hmm, whatever happened, try again. */ 194
346 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, " 195out_restart:
347 "cnt %02d, " 196 verify_start(cdev);
348 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 197 return;
349 cdev->private->dev_id.ssid, 198err:
350 cdev->private->dev_id.devno, 199 verify_done(cdev, rc);
351 irb->esw.esw0.erw.scnt, 200}
352 irb->ecw[0], irb->ecw[1], 201
353 irb->ecw[2], irb->ecw[3], 202static void spid_start(struct ccw_device *cdev)
354 irb->ecw[4], irb->ecw[5], 203{
355 irb->ecw[6], irb->ecw[7]); 204 struct ccw_request *req = &cdev->private->req;
356 return -EAGAIN; 205
357 } 206 /* Initialize request data. */
358 if (irb->scsw.cmd.cc == 3) { 207 memset(req, 0, sizeof(*req));
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 208 req->timeout = PGID_TIMEOUT;
360 " lpm %02X, became 'not operational'\n", 209 req->maxretries = PGID_RETRIES;
361 cdev->private->dev_id.devno, sch->schid.ssid, 210 req->lpm = 0x80;
362 sch->schid.sch_no, cdev->private->imask); 211 req->callback = spid_callback;
363 return -EACCES; 212 spid_do(cdev);
364 } 213}
365 return 0; 214
215static int pgid_cmp(struct pgid *p1, struct pgid *p2)
216{
217 return memcmp((char *) p1 + 1, (char *) p2 + 1,
218 sizeof(struct pgid) - 1);
366} 219}
367 220
368/* 221/*
369 * Called from interrupt context to check the path status after a nop has 222 * Determine pathgroup state from PGID data.
370 * been send.
371 */ 223 */
372static int __ccw_device_check_nop(struct ccw_device *cdev) 224static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
225 int *mismatch, int *reserved, int *reset)
373{ 226{
374 struct subchannel *sch; 227 struct pgid *pgid = &cdev->private->pgid[0];
375 struct irb *irb; 228 struct pgid *first = NULL;
376 229 int lpm;
377 sch = to_subchannel(cdev->dev.parent); 230 int i;
378 irb = &cdev->private->irb; 231
379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 232 *mismatch = 0;
380 /* Retry NOP if requested. */ 233 *reserved = 0;
381 if (cdev->private->flags.intretry) { 234 *reset = 0;
382 cdev->private->flags.intretry = 0; 235 for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
383 return -EAGAIN; 236 if ((cdev->private->pgid_valid_mask & lpm) == 0)
237 continue;
238 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
239 *reserved = 1;
240 if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
241 /* A PGID was reset. */
242 *reset = 1;
243 continue;
384 } 244 }
385 return -ETIME; 245 if (!first) {
386 } 246 first = pgid;
387 if (irb->scsw.cmd.cc == 3) { 247 continue;
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 248 }
389 " lpm %02X, became 'not operational'\n", 249 if (pgid_cmp(pgid, first) != 0)
390 cdev->private->dev_id.devno, sch->schid.ssid, 250 *mismatch = 1;
391 sch->schid.sch_no, cdev->private->imask);
392 return -EACCES;
393 } 251 }
394 return 0; 252 if (!first)
253 first = &channel_subsystems[0]->global_pgid;
254 *p = first;
395} 255}
396 256
397static void 257static u8 pgid_to_vpm(struct ccw_device *cdev)
398__ccw_device_verify_start(struct ccw_device *cdev)
399{ 258{
400 struct subchannel *sch; 259 struct subchannel *sch = to_subchannel(cdev->dev.parent);
401 __u8 func; 260 struct pgid *pgid;
402 int ret; 261 int i;
403 262 int lpm;
404 sch = to_subchannel(cdev->dev.parent); 263 u8 vpm = 0;
405 /* Repeat for all paths. */ 264
406 for (; cdev->private->imask; cdev->private->imask >>= 1, 265 /* Set VPM bits for paths which are already in the target state. */
407 cdev->private->iretry = 5) { 266 for (i = 0; i < 8; i++) {
408 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0) 267 lpm = 0x80 >> i;
409 /* Path not available, try next. */ 268 if ((cdev->private->pgid_valid_mask & lpm) == 0)
410 continue; 269 continue;
411 if (cdev->private->options.pgroup) { 270 pgid = &cdev->private->pgid[i];
412 if (sch->opm & cdev->private->imask) 271 if (sch->opm & lpm) {
413 func = SPID_FUNC_ESTABLISH; 272 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
414 else 273 continue;
415 func = SPID_FUNC_RESIGN; 274 } else {
416 ret = __ccw_device_do_pgid(cdev, func); 275 if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
417 } else 276 continue;
418 ret = __ccw_device_do_nop(cdev); 277 }
419 /* We expect an interrupt in case of success or busy 278 if (cdev->private->flags.mpath) {
420 * indication. */ 279 if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
421 if (ret == 0 || ret == -EBUSY) 280 continue;
422 return; 281 } else {
423 /* Permanent path failure, try next. */ 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
283 continue;
284 }
285 vpm |= lpm;
424 } 286 }
425 /* Done with all paths. */ 287
426 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV); 288 return vpm;
427} 289}
428 290
429/* 291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
430 * Got interrupt for Set Path Group ID.
431 */
432void
433ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
434{ 292{
435 struct subchannel *sch; 293 int i;
436 struct irb *irb;
437 int ret;
438 294
439 irb = (struct irb *) __LC_IRB; 295 for (i = 0; i < 8; i++)
296 memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
297}
440 298
441 if (irb->scsw.cmd.stctl == 299/*
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 300 * Process SENSE PGID data and report result.
443 if (__ccw_device_should_retry(&irb->scsw)) 301 */
444 __ccw_device_verify_start(cdev); 302static void snid_done(struct ccw_device *cdev, int rc)
445 return; 303{
304 struct ccw_dev_id *id = &cdev->private->dev_id;
305 struct subchannel *sch = to_subchannel(cdev->dev.parent);
306 struct pgid *pgid;
307 int mismatch = 0;
308 int reserved = 0;
309 int reset = 0;
310
311 if (rc)
312 goto out;
313 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
314 if (reserved)
315 rc = -EUSERS;
316 else if (mismatch)
317 rc = -EOPNOTSUPP;
318 else {
319 sch->vpm = pgid_to_vpm(cdev);
320 pgid_fill(cdev, pgid);
446 } 321 }
447 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 322out:
448 return; 323 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
449 sch = to_subchannel(cdev->dev.parent); 324 "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
450 if (cdev->private->options.pgroup) 325 cdev->private->pgid_valid_mask, sch->vpm, mismatch,
451 ret = __ccw_device_check_pgid(cdev); 326 reserved, reset);
452 else 327 switch (rc) {
453 ret = __ccw_device_check_nop(cdev);
454 memset(&cdev->private->irb, 0, sizeof(struct irb));
455
456 switch (ret) {
457 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
458 case 0: 328 case 0:
459 /* Path verification ccw finished successfully, update lpm. */ 329 /* Anything left to do? */
460 sch->vpm |= sch->opm & cdev->private->imask; 330 if (sch->vpm == sch->schib.pmcw.pam) {
461 /* Go on with next path. */ 331 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
462 cdev->private->imask >>= 1; 332 return;
463 cdev->private->iretry = 5; 333 }
464 __ccw_device_verify_start(cdev); 334 /* Perform path-grouping. */
335 spid_start(cdev);
465 break; 336 break;
466 case -EOPNOTSUPP: 337 case -EOPNOTSUPP:
467 /* 338 /* Path-grouping not supported. */
468 * One of those strange devices which claim to be able 339 cdev->private->flags.pgroup = 0;
469 * to do multipathing but not for Set Path Group ID. 340 cdev->private->flags.mpath = 0;
470 */ 341 verify_start(cdev);
471 if (cdev->private->flags.pgid_single)
472 cdev->private->options.pgroup = 0;
473 else
474 cdev->private->flags.pgid_single = 1;
475 /* Retry */
476 sch->vpm = 0;
477 cdev->private->imask = 0x80;
478 cdev->private->iretry = 5;
479 /* fall through. */
480 case -EAGAIN: /* Try again. */
481 __ccw_device_verify_start(cdev);
482 break;
483 case -ETIME: /* Set path group id stopped by timeout. */
484 ccw_device_verify_done(cdev, -ETIME);
485 break;
486 case -EACCES: /* channel is not operational. */
487 cdev->private->imask >>= 1;
488 cdev->private->iretry = 5;
489 __ccw_device_verify_start(cdev);
490 break; 342 break;
343 default:
344 verify_done(cdev, rc);
491 } 345 }
492} 346}
493 347
494void 348/*
495ccw_device_verify_start(struct ccw_device *cdev) 349 * Create channel program to perform a SENSE PGID on a single path.
350 */
351static void snid_build_cp(struct ccw_device *cdev)
352{
353 struct ccw_request *req = &cdev->private->req;
354 struct ccw1 *cp = cdev->private->iccws;
355 int i = 8 - ffs(req->lpm);
356
357 /* Channel program setup. */
358 cp->cmd_code = CCW_CMD_SENSE_PGID;
359 cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
360 cp->count = sizeof(struct pgid);
361 cp->flags = CCW_FLAG_SLI;
362 req->cp = cp;
363}
364
365/*
366 * Perform SENSE PGID on a single path.
367 */
368static void snid_do(struct ccw_device *cdev)
496{ 369{
497 struct subchannel *sch = to_subchannel(cdev->dev.parent); 370 struct subchannel *sch = to_subchannel(cdev->dev.parent);
371 struct ccw_request *req = &cdev->private->req;
372
373 /* Adjust lpm if paths are not set in pam. */
374 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
375 if (!req->lpm)
376 goto out_nopath;
377 snid_build_cp(cdev);
378 ccw_request_start(cdev);
379 return;
380
381out_nopath:
382 snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
383}
498 384
499 cdev->private->flags.pgid_single = 0; 385/*
500 cdev->private->imask = 0x80; 386 * Process SENSE PGID request result for single path.
501 cdev->private->iretry = 5; 387 */
388static void snid_callback(struct ccw_device *cdev, void *data, int rc)
389{
390 struct ccw_request *req = &cdev->private->req;
391
392 if (rc == 0)
393 cdev->private->pgid_valid_mask |= req->lpm;
394 else if (rc != -EACCES)
395 goto err;
396 req->lpm >>= 1;
397 snid_do(cdev);
398 return;
399
400err:
401 snid_done(cdev, rc);
402}
502 403
503 /* Start with empty vpm. */ 404/*
504 sch->vpm = 0; 405 * Perform path verification.
406 */
407static void verify_start(struct ccw_device *cdev)
408{
409 struct subchannel *sch = to_subchannel(cdev->dev.parent);
410 struct ccw_request *req = &cdev->private->req;
411 struct ccw_dev_id *devid = &cdev->private->dev_id;
505 412
506 /* Get current pam. */ 413 sch->vpm = 0;
507 if (cio_update_schib(sch)) { 414 /* Initialize request data. */
508 ccw_device_verify_done(cdev, -ENODEV); 415 memset(req, 0, sizeof(*req));
509 return; 416 req->timeout = PGID_TIMEOUT;
417 req->maxretries = PGID_RETRIES;
418 req->lpm = 0x80;
419 if (cdev->private->flags.pgroup) {
420 CIO_TRACE_EVENT(4, "snid");
421 CIO_HEX_EVENT(4, devid, sizeof(*devid));
422 req->callback = snid_callback;
423 snid_do(cdev);
424 } else {
425 CIO_TRACE_EVENT(4, "nop");
426 CIO_HEX_EVENT(4, devid, sizeof(*devid));
427 req->filter = nop_filter;
428 req->callback = nop_callback;
429 nop_do(cdev);
510 } 430 }
511 /* After 60s path verification is considered to have failed. */
512 ccw_device_set_timeout(cdev, 60*HZ);
513 __ccw_device_verify_start(cdev);
514} 431}
515 432
516static void 433/**
517__ccw_device_disband_start(struct ccw_device *cdev) 434 * ccw_device_verify_start - perform path verification
435 * @cdev: ccw device
436 *
437 * Perform an I/O on each available channel path to @cdev to determine which
438 * paths are operational. The resulting path mask is stored in sch->vpm.
439 * If device options specify pathgrouping, establish a pathgroup for the
440 * operational paths. When finished, call ccw_device_verify_done with a
441 * return code specifying the result.
442 */
443void ccw_device_verify_start(struct ccw_device *cdev)
518{ 444{
519 struct subchannel *sch; 445 CIO_TRACE_EVENT(4, "vrfy");
520 int ret; 446 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
521 447 /* Initialize PGID data. */
522 sch = to_subchannel(cdev->dev.parent); 448 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
523 while (cdev->private->imask != 0) { 449 cdev->private->pgid_valid_mask = 0;
524 if (sch->lpm & cdev->private->imask) { 450 /*
525 ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND); 451 * Initialize pathgroup and multipath state with target values.
526 if (ret == 0) 452 * They may change in the course of path verification.
527 return; 453 */
528 } 454 cdev->private->flags.pgroup = cdev->private->options.pgroup;
529 cdev->private->iretry = 5; 455 cdev->private->flags.mpath = cdev->private->options.mpath;
530 cdev->private->imask >>= 1; 456 cdev->private->flags.doverify = 0;
531 } 457 verify_start(cdev);
532 ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
533} 458}
534 459
535/* 460/*
536 * Got interrupt for Unset Path Group ID. 461 * Process disband SET PGID request result.
537 */ 462 */
538void 463static void disband_callback(struct ccw_device *cdev, void *data, int rc)
539ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
540{ 464{
541 struct subchannel *sch; 465 struct subchannel *sch = to_subchannel(cdev->dev.parent);
542 struct irb *irb; 466 struct ccw_dev_id *id = &cdev->private->dev_id;
543 int ret; 467
468 if (rc)
469 goto out;
470 /* Ensure consistent multipathing state at device and channel. */
471 cdev->private->flags.mpath = 0;
472 if (sch->config.mp) {
473 sch->config.mp = 0;
474 rc = cio_commit_config(sch);
475 }
476out:
477 CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
478 rc);
479 ccw_device_disband_done(cdev, rc);
480}
544 481
545 irb = (struct irb *) __LC_IRB; 482/**
483 * ccw_device_disband_start - disband pathgroup
484 * @cdev: ccw device
485 *
486 * Execute a SET PGID channel program on @cdev to disband a previously
487 * established pathgroup. When finished, call ccw_device_disband_done with
488 * a return code specifying the result.
489 */
490void ccw_device_disband_start(struct ccw_device *cdev)
491{
492 struct subchannel *sch = to_subchannel(cdev->dev.parent);
493 struct ccw_request *req = &cdev->private->req;
494 u8 fn;
495
496 CIO_TRACE_EVENT(4, "disb");
497 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
498 /* Request setup. */
499 memset(req, 0, sizeof(*req));
500 req->timeout = PGID_TIMEOUT;
501 req->maxretries = PGID_RETRIES;
502 req->lpm = sch->schib.pmcw.pam & sch->opm;
503 req->callback = disband_callback;
504 fn = SPID_FUNC_DISBAND;
505 if (cdev->private->flags.mpath)
506 fn |= SPID_FUNC_MULTI_PATH;
507 spid_build_cp(cdev, fn);
508 ccw_request_start(cdev);
509}
546 510
547 if (irb->scsw.cmd.stctl == 511static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 512{
549 if (__ccw_device_should_retry(&irb->scsw)) 513 struct ccw_request *req = &cdev->private->req;
550 __ccw_device_disband_start(cdev); 514 struct ccw1 *cp = cdev->private->iccws;
551 return; 515
552 } 516 cp[0].cmd_code = CCW_CMD_STLCK;
553 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 517 cp[0].cda = (u32) (addr_t) buf1;
554 return; 518 cp[0].count = 32;
555 sch = to_subchannel(cdev->dev.parent); 519 cp[0].flags = CCW_FLAG_CC;
556 ret = __ccw_device_check_pgid(cdev); 520 cp[1].cmd_code = CCW_CMD_RELEASE;
557 memset(&cdev->private->irb, 0, sizeof(struct irb)); 521 cp[1].cda = (u32) (addr_t) buf2;
558 switch (ret) { 522 cp[1].count = 32;
559 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 523 cp[1].flags = 0;
560 case 0: /* disband successful. */ 524 req->cp = cp;
561 ccw_device_disband_done(cdev, ret);
562 break;
563 case -EOPNOTSUPP:
564 /*
565 * One of those strange devices which claim to be able
566 * to do multipathing but not for Unset Path Group ID.
567 */
568 cdev->private->flags.pgid_single = 1;
569 /* fall through. */
570 case -EAGAIN: /* Try again. */
571 __ccw_device_disband_start(cdev);
572 break;
573 case -ETIME: /* Set path group id stopped by timeout. */
574 ccw_device_disband_done(cdev, -ETIME);
575 break;
576 case -EACCES: /* channel is not operational. */
577 cdev->private->imask >>= 1;
578 cdev->private->iretry = 5;
579 __ccw_device_disband_start(cdev);
580 break;
581 }
582} 525}
583 526
584void 527static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
585ccw_device_disband_start(struct ccw_device *cdev)
586{ 528{
587 /* After 60s disbanding is considered to have failed. */ 529 ccw_device_stlck_done(cdev, data, rc);
588 ccw_device_set_timeout(cdev, 60*HZ); 530}
589 531
590 cdev->private->flags.pgid_single = 0; 532/**
591 cdev->private->iretry = 5; 533 * ccw_device_stlck_start - perform unconditional release
592 cdev->private->imask = 0x80; 534 * @cdev: ccw device
593 __ccw_device_disband_start(cdev); 535 * @data: data pointer to be passed to ccw_device_stlck_done
536 * @buf1: data pointer used in channel program
537 * @buf2: data pointer used in channel program
538 *
539 * Execute a channel program on @cdev to release an existing PGID reservation.
540 * When finished, call ccw_device_stlck_done with a return code specifying the
541 * result.
542 */
543void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
544 void *buf2)
545{
546 struct subchannel *sch = to_subchannel(cdev->dev.parent);
547 struct ccw_request *req = &cdev->private->req;
548
549 CIO_TRACE_EVENT(4, "stlck");
550 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
551 /* Request setup. */
552 memset(req, 0, sizeof(*req));
553 req->timeout = PGID_TIMEOUT;
554 req->maxretries = PGID_RETRIES;
555 req->lpm = sch->schib.pmcw.pam & sch->opm;
556 req->data = data;
557 req->callback = stlck_callback;
558 stlck_build_cp(cdev, buf1, buf2);
559 ccw_request_start(cdev);
594} 560}
561
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 5814dbee2410..66d8066ef22a 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
336 sense_ccw->count = SENSE_MAX_COUNT; 336 sense_ccw->count = SENSE_MAX_COUNT;
337 sense_ccw->flags = CCW_FLAG_SLI; 337 sense_ccw->flags = CCW_FLAG_SLI;
338 338
339 /* Reset internal retry indication. */
340 cdev->private->flags.intretry = 0;
341
342 rc = cio_start(sch, sense_ccw, 0xff); 339 rc = cio_start(sch, sense_ccw, 0xff);
343 if (rc == -ENODEV || rc == -EACCES) 340 if (rc == -ENODEV || rc == -EACCES)
344 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 341 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 0b8f381bd20e..d72ae4c93af9 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,7 +1,10 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include <linux/types.h>
4#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h>
7#include "css.h"
5 8
6/* 9/*
7 * command-mode operation request block 10 * command-mode operation request block
@@ -68,6 +71,52 @@ struct io_subchannel_private {
68#define MAX_CIWS 8 71#define MAX_CIWS 8
69 72
70/* 73/*
74 * Possible status values for a CCW request's I/O.
75 */
76enum io_status {
77 IO_DONE,
78 IO_RUNNING,
79 IO_STATUS_ERROR,
80 IO_PATH_ERROR,
81 IO_REJECTED,
82 IO_KILLED
83};
84
85/**
86 * ccw_request - Internal CCW request.
87 * @cp: channel program to start
88 * @timeout: maximum allowable time in jiffies between start I/O and interrupt
89 * @maxretries: number of retries per I/O operation and path
90 * @lpm: mask of paths to use
91 * @check: optional callback that determines if results are final
92 * @filter: optional callback to adjust request status based on IRB data
93 * @callback: final callback
94 * @data: user-defined pointer passed to all callbacks
95 * @mask: current path mask
96 * @retries: current number of retries
97 * @drc: delayed return code
98 * @cancel: non-zero if request was cancelled
99 * @done: non-zero if request was finished
100 */
101struct ccw_request {
102 struct ccw1 *cp;
103 unsigned long timeout;
104 u16 maxretries;
105 u8 lpm;
106 int (*check)(struct ccw_device *, void *);
107 enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
108 enum io_status);
109 void (*callback)(struct ccw_device *, void *, int);
110 void *data;
111 /* These fields are used internally. */
112 u16 mask;
113 u16 retries;
114 int drc;
115 int cancel:1;
116 int done:1;
117} __attribute__((packed));
118
119/*
71 * sense-id response buffer layout 120 * sense-id response buffer layout
72 */ 121 */
73struct senseid { 122struct senseid {
@@ -82,32 +131,43 @@ struct senseid {
82 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ 131 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
83} __attribute__ ((packed, aligned(4))); 132} __attribute__ ((packed, aligned(4)));
84 133
134enum cdev_todo {
135 CDEV_TODO_NOTHING,
136 CDEV_TODO_ENABLE_CMF,
137 CDEV_TODO_REBIND,
138 CDEV_TODO_REGISTER,
139 CDEV_TODO_UNREG,
140 CDEV_TODO_UNREG_EVAL,
141};
142
85struct ccw_device_private { 143struct ccw_device_private {
86 struct ccw_device *cdev; 144 struct ccw_device *cdev;
87 struct subchannel *sch; 145 struct subchannel *sch;
88 int state; /* device state */ 146 int state; /* device state */
89 atomic_t onoff; 147 atomic_t onoff;
90 unsigned long registered;
91 struct ccw_dev_id dev_id; /* device id */ 148 struct ccw_dev_id dev_id; /* device id */
92 struct subchannel_id schid; /* subchannel number */ 149 struct subchannel_id schid; /* subchannel number */
93 u8 imask; /* lpm mask for SNID/SID/SPGID */ 150 struct ccw_request req; /* internal I/O request */
94 int iretry; /* retry counter SNID/SID/SPGID */ 151 int iretry;
152 u8 pgid_valid_mask; /* mask of valid PGIDs */
95 struct { 153 struct {
96 unsigned int fast:1; /* post with "channel end" */ 154 unsigned int fast:1; /* post with "channel end" */
97 unsigned int repall:1; /* report every interrupt status */ 155 unsigned int repall:1; /* report every interrupt status */
98 unsigned int pgroup:1; /* do path grouping */ 156 unsigned int pgroup:1; /* do path grouping */
99 unsigned int force:1; /* allow forced online */ 157 unsigned int force:1; /* allow forced online */
158 unsigned int mpath:1; /* do multipathing */
100 } __attribute__ ((packed)) options; 159 } __attribute__ ((packed)) options;
101 struct { 160 struct {
102 unsigned int pgid_single:1; /* use single path for Set PGID */
103 unsigned int esid:1; /* Ext. SenseID supported by HW */ 161 unsigned int esid:1; /* Ext. SenseID supported by HW */
104 unsigned int dosense:1; /* delayed SENSE required */ 162 unsigned int dosense:1; /* delayed SENSE required */
105 unsigned int doverify:1; /* delayed path verification */ 163 unsigned int doverify:1; /* delayed path verification */
106 unsigned int donotify:1; /* call notify function */ 164 unsigned int donotify:1; /* call notify function */
107 unsigned int recog_done:1; /* dev. recog. complete */ 165 unsigned int recog_done:1; /* dev. recog. complete */
108 unsigned int fake_irb:1; /* deliver faked irb */ 166 unsigned int fake_irb:1; /* deliver faked irb */
109 unsigned int intretry:1; /* retry internal operation */
110 unsigned int resuming:1; /* recognition while resume */ 167 unsigned int resuming:1; /* recognition while resume */
168 unsigned int pgroup:1; /* pathgroup is set up */
169 unsigned int mpath:1; /* multipathing is set up */
170 unsigned int initialized:1; /* set if initial reference held */
111 } __attribute__((packed)) flags; 171 } __attribute__((packed)) flags;
112 unsigned long intparm; /* user interruption parameter */ 172 unsigned long intparm; /* user interruption parameter */
113 struct qdio_irq *qdio_data; 173 struct qdio_irq *qdio_data;
@@ -115,7 +175,8 @@ struct ccw_device_private {
115 struct senseid senseid; /* SenseID info */ 175 struct senseid senseid; /* SenseID info */
116 struct pgid pgid[8]; /* path group IDs per chpid*/ 176 struct pgid pgid[8]; /* path group IDs per chpid*/
117 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 177 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
118 struct work_struct kick_work; 178 struct work_struct todo_work;
179 enum cdev_todo todo;
119 wait_queue_head_t wait_q; 180 wait_queue_head_t wait_q;
120 struct timer_list timer; 181 struct timer_list timer;
121 void *cmb; /* measurement information */ 182 void *cmb; /* measurement information */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 1b78f639ead3..76769978285f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -125,7 +125,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
125 filp->f_path.dentry->d_inode->i_private); 125 filp->f_path.dentry->d_inode->i_private);
126} 126}
127 127
128static struct file_operations debugfs_fops = { 128static const struct file_operations debugfs_fops = {
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
130 .open = qstat_seq_open, 130 .open = qstat_seq_open,
131 .read = seq_read, 131 .read = seq_read,
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index eff943923c6f..968e3c7c2632 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
84 return single_open(filp, qdio_perf_proc_show, NULL); 84 return single_open(filp, qdio_perf_proc_show, NULL);
85} 85}
86 86
87static struct file_operations qdio_perf_proc_fops = { 87static const struct file_operations qdio_perf_proc_fops = {
88 .owner = THIS_MODULE, 88 .owner = THIS_MODULE,
89 .open = qdio_perf_seq_open, 89 .open = qdio_perf_seq_open,
90 .read = seq_read, 90 .read = seq_read,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 1294876bf7b4..20836eff88c5 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -102,6 +102,7 @@ static atomic_t ap_poll_requests = ATOMIC_INIT(0);
102static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 102static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
103static struct task_struct *ap_poll_kthread = NULL; 103static struct task_struct *ap_poll_kthread = NULL;
104static DEFINE_MUTEX(ap_poll_thread_mutex); 104static DEFINE_MUTEX(ap_poll_thread_mutex);
105static DEFINE_SPINLOCK(ap_poll_timer_lock);
105static void *ap_interrupt_indicator; 106static void *ap_interrupt_indicator;
106static struct hrtimer ap_poll_timer; 107static struct hrtimer ap_poll_timer;
107/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 108/* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
@@ -282,6 +283,7 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
282 * @psmid: The program supplied message identifier 283 * @psmid: The program supplied message identifier
283 * @msg: The message text 284 * @msg: The message text
284 * @length: The message length 285 * @length: The message length
286 * @special: Special Bit
285 * 287 *
286 * Returns AP queue status structure. 288 * Returns AP queue status structure.
287 * Condition code 1 on NQAP can't happen because the L bit is 1. 289 * Condition code 1 on NQAP can't happen because the L bit is 1.
@@ -289,7 +291,8 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
289 * because a segment boundary was reached. The NQAP is repeated. 291 * because a segment boundary was reached. The NQAP is repeated.
290 */ 292 */
291static inline struct ap_queue_status 293static inline struct ap_queue_status
292__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 294__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
295 unsigned int special)
293{ 296{
294 typedef struct { char _[length]; } msgblock; 297 typedef struct { char _[length]; } msgblock;
295 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 298 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
@@ -299,6 +302,9 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
299 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 302 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
300 register unsigned long reg5 asm ("5") = (unsigned int) psmid; 303 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
301 304
305 if (special == 1)
306 reg0 |= 0x400000UL;
307
302 asm volatile ( 308 asm volatile (
303 "0: .long 0xb2ad0042\n" /* DQAP */ 309 "0: .long 0xb2ad0042\n" /* DQAP */
304 " brc 2,0b" 310 " brc 2,0b"
@@ -312,13 +318,15 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
312{ 318{
313 struct ap_queue_status status; 319 struct ap_queue_status status;
314 320
315 status = __ap_send(qid, psmid, msg, length); 321 status = __ap_send(qid, psmid, msg, length, 0);
316 switch (status.response_code) { 322 switch (status.response_code) {
317 case AP_RESPONSE_NORMAL: 323 case AP_RESPONSE_NORMAL:
318 return 0; 324 return 0;
319 case AP_RESPONSE_Q_FULL: 325 case AP_RESPONSE_Q_FULL:
320 case AP_RESPONSE_RESET_IN_PROGRESS: 326 case AP_RESPONSE_RESET_IN_PROGRESS:
321 return -EBUSY; 327 return -EBUSY;
328 case AP_RESPONSE_REQ_FAC_NOT_INST:
329 return -EINVAL;
322 default: /* Device is gone. */ 330 default: /* Device is gone. */
323 return -ENODEV; 331 return -ENODEV;
324 } 332 }
@@ -1008,7 +1016,7 @@ static int ap_probe_device_type(struct ap_device *ap_dev)
1008 } 1016 }
1009 1017
1010 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1018 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1011 msg, sizeof(msg)); 1019 msg, sizeof(msg), 0);
1012 if (status.response_code != AP_RESPONSE_NORMAL) { 1020 if (status.response_code != AP_RESPONSE_NORMAL) {
1013 rc = -ENODEV; 1021 rc = -ENODEV;
1014 goto out_free; 1022 goto out_free;
@@ -1163,16 +1171,19 @@ ap_config_timeout(unsigned long ptr)
1163static inline void ap_schedule_poll_timer(void) 1171static inline void ap_schedule_poll_timer(void)
1164{ 1172{
1165 ktime_t hr_time; 1173 ktime_t hr_time;
1174
1175 spin_lock_bh(&ap_poll_timer_lock);
1166 if (ap_using_interrupts() || ap_suspend_flag) 1176 if (ap_using_interrupts() || ap_suspend_flag)
1167 return; 1177 goto out;
1168 if (hrtimer_is_queued(&ap_poll_timer)) 1178 if (hrtimer_is_queued(&ap_poll_timer))
1169 return; 1179 goto out;
1170 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1180 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1171 hr_time = ktime_set(0, poll_timeout); 1181 hr_time = ktime_set(0, poll_timeout);
1172 hrtimer_forward_now(&ap_poll_timer, hr_time); 1182 hrtimer_forward_now(&ap_poll_timer, hr_time);
1173 hrtimer_restart(&ap_poll_timer); 1183 hrtimer_restart(&ap_poll_timer);
1174 } 1184 }
1175 return; 1185out:
1186 spin_unlock_bh(&ap_poll_timer_lock);
1176} 1187}
1177 1188
1178/** 1189/**
@@ -1243,7 +1254,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1243 /* Start the next request on the queue. */ 1254 /* Start the next request on the queue. */
1244 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1255 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1245 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1256 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1246 ap_msg->message, ap_msg->length); 1257 ap_msg->message, ap_msg->length, ap_msg->special);
1247 switch (status.response_code) { 1258 switch (status.response_code) {
1248 case AP_RESPONSE_NORMAL: 1259 case AP_RESPONSE_NORMAL:
1249 atomic_inc(&ap_poll_requests); 1260 atomic_inc(&ap_poll_requests);
@@ -1261,6 +1272,7 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1261 *flags |= 2; 1272 *flags |= 2;
1262 break; 1273 break;
1263 case AP_RESPONSE_MESSAGE_TOO_BIG: 1274 case AP_RESPONSE_MESSAGE_TOO_BIG:
1275 case AP_RESPONSE_REQ_FAC_NOT_INST:
1264 return -EINVAL; 1276 return -EINVAL;
1265 default: 1277 default:
1266 return -ENODEV; 1278 return -ENODEV;
@@ -1302,7 +1314,8 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1302 if (list_empty(&ap_dev->requestq) && 1314 if (list_empty(&ap_dev->requestq) &&
1303 ap_dev->queue_count < ap_dev->queue_depth) { 1315 ap_dev->queue_count < ap_dev->queue_depth) {
1304 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1316 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1305 ap_msg->message, ap_msg->length); 1317 ap_msg->message, ap_msg->length,
1318 ap_msg->special);
1306 switch (status.response_code) { 1319 switch (status.response_code) {
1307 case AP_RESPONSE_NORMAL: 1320 case AP_RESPONSE_NORMAL:
1308 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1321 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
@@ -1317,6 +1330,7 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1317 ap_dev->requestq_count++; 1330 ap_dev->requestq_count++;
1318 ap_dev->total_request_count++; 1331 ap_dev->total_request_count++;
1319 return -EBUSY; 1332 return -EBUSY;
1333 case AP_RESPONSE_REQ_FAC_NOT_INST:
1320 case AP_RESPONSE_MESSAGE_TOO_BIG: 1334 case AP_RESPONSE_MESSAGE_TOO_BIG:
1321 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1335 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1322 return -EINVAL; 1336 return -EINVAL;
@@ -1658,6 +1672,7 @@ int __init ap_module_init(void)
1658 */ 1672 */
1659 if (MACHINE_IS_VM) 1673 if (MACHINE_IS_VM)
1660 poll_timeout = 1500000; 1674 poll_timeout = 1500000;
1675 spin_lock_init(&ap_poll_timer_lock);
1661 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1676 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1662 ap_poll_timer.function = ap_poll_timeout; 1677 ap_poll_timer.function = ap_poll_timeout;
1663 1678
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index a35362241805..4785d07cd447 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -87,6 +87,7 @@ struct ap_queue_status {
87#define AP_RESPONSE_INDEX_TOO_BIG 0x11 87#define AP_RESPONSE_INDEX_TOO_BIG 0x11
88#define AP_RESPONSE_NO_FIRST_PART 0x13 88#define AP_RESPONSE_NO_FIRST_PART 0x13
89#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 89#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
90#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
90 91
91/* 92/*
92 * Known device types 93 * Known device types
@@ -96,8 +97,8 @@ struct ap_queue_status {
96#define AP_DEVICE_TYPE_PCIXCC 5 97#define AP_DEVICE_TYPE_PCIXCC 5
97#define AP_DEVICE_TYPE_CEX2A 6 98#define AP_DEVICE_TYPE_CEX2A 6
98#define AP_DEVICE_TYPE_CEX2C 7 99#define AP_DEVICE_TYPE_CEX2C 7
99#define AP_DEVICE_TYPE_CEX2A2 8 100#define AP_DEVICE_TYPE_CEX3A 8
100#define AP_DEVICE_TYPE_CEX2C2 9 101#define AP_DEVICE_TYPE_CEX3C 9
101 102
102/* 103/*
103 * AP reset flag states 104 * AP reset flag states
@@ -161,12 +162,25 @@ struct ap_message {
161 size_t length; /* Message length. */ 162 size_t length; /* Message length. */
162 163
163 void *private; /* ap driver private pointer. */ 164 void *private; /* ap driver private pointer. */
165 unsigned int special:1; /* Used for special commands. */
164}; 166};
165 167
166#define AP_DEVICE(dt) \ 168#define AP_DEVICE(dt) \
167 .dev_type=(dt), \ 169 .dev_type=(dt), \
168 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 170 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
169 171
172/**
173 * ap_init_message() - Initialize ap_message.
174 * Initialize a message before using. Otherwise this might result in
175 * unexpected behaviour.
176 */
177static inline void ap_init_message(struct ap_message *ap_msg)
178{
179 ap_msg->psmid = 0;
180 ap_msg->length = 0;
181 ap_msg->special = 0;
182}
183
170/* 184/*
171 * Note: don't use ap_send/ap_recv after using ap_queue_message 185 * Note: don't use ap_send/ap_recv after using ap_queue_message
172 * for the first time. Otherwise the ap message queue will get 186 * for the first time. Otherwise the ap message queue will get
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 65b6a96afe6b..0d4d18bdd45c 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -299,9 +299,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
299 */ 299 */
300static int zcrypt_open(struct inode *inode, struct file *filp) 300static int zcrypt_open(struct inode *inode, struct file *filp)
301{ 301{
302 lock_kernel();
303 atomic_inc(&zcrypt_open_count); 302 atomic_inc(&zcrypt_open_count);
304 unlock_kernel();
305 return 0; 303 return 0;
306} 304}
307 305
@@ -1009,6 +1007,10 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
1009 zcrypt_count_type(ZCRYPT_CEX2C)); 1007 zcrypt_count_type(ZCRYPT_CEX2C));
1010 len += sprintf(resp_buff + len, "CEX2A count: %d\n", 1008 len += sprintf(resp_buff + len, "CEX2A count: %d\n",
1011 zcrypt_count_type(ZCRYPT_CEX2A)); 1009 zcrypt_count_type(ZCRYPT_CEX2A));
1010 len += sprintf(resp_buff + len, "CEX3C count: %d\n",
1011 zcrypt_count_type(ZCRYPT_CEX3C));
1012 len += sprintf(resp_buff + len, "CEX3A count: %d\n",
1013 zcrypt_count_type(ZCRYPT_CEX3A));
1012 len += sprintf(resp_buff + len, "requestq count: %d\n", 1014 len += sprintf(resp_buff + len, "requestq count: %d\n",
1013 zcrypt_requestq_count()); 1015 zcrypt_requestq_count());
1014 len += sprintf(resp_buff + len, "pendingq count: %d\n", 1016 len += sprintf(resp_buff + len, "pendingq count: %d\n",
@@ -1017,7 +1019,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
1017 atomic_read(&zcrypt_open_count)); 1019 atomic_read(&zcrypt_open_count));
1018 zcrypt_status_mask(workarea); 1020 zcrypt_status_mask(workarea);
1019 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1021 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1020 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", 1022 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1021 resp_buff+len, workarea, AP_DEVICES); 1023 resp_buff+len, workarea, AP_DEVICES);
1022 zcrypt_qdepth_mask(workarea); 1024 zcrypt_qdepth_mask(workarea);
1023 len += sprinthx("Waiting work element counts", 1025 len += sprinthx("Waiting work element counts",
@@ -1095,8 +1097,9 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
1095 * '0' for no device, '1' for PCICA, '2' for PCICC, 1097 * '0' for no device, '1' for PCICA, '2' for PCICC,
1096 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1098 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1097 * '5' for CEX2C and '6' for CEX2A' 1099 * '5' for CEX2C and '6' for CEX2A'
1100 * '7' for CEX3C and '8' for CEX3A
1098 */ 1101 */
1099 if (*ptr >= '0' && *ptr <= '6') 1102 if (*ptr >= '0' && *ptr <= '8')
1100 j++; 1103 j++;
1101 else if (*ptr == 'd' || *ptr == 'D') 1104 else if (*ptr == 'd' || *ptr == 'D')
1102 zcrypt_disable_card(j++); 1105 zcrypt_disable_card(j++);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 1d1ec74dadb2..8e7ffbf2466c 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -71,6 +71,8 @@ struct ica_z90_status {
71#define ZCRYPT_PCIXCC_MCL3 4 71#define ZCRYPT_PCIXCC_MCL3 4
72#define ZCRYPT_CEX2C 5 72#define ZCRYPT_CEX2C 5
73#define ZCRYPT_CEX2A 6 73#define ZCRYPT_CEX2A 6
74#define ZCRYPT_CEX3C 7
75#define ZCRYPT_CEX3A 8
74 76
75/** 77/**
76 * Large random numbers are pulled in 4096 byte chunks from the crypto cards 78 * Large random numbers are pulled in 4096 byte chunks from the crypto cards
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 326ea08f67c9..c6fb0aa89507 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -39,17 +39,24 @@
39 39
40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 40#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 41#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
42#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
43#define CEX3A_MAX_MOD_SIZE CEX2A_MAX_MOD_SIZE
42 44
43#define CEX2A_SPEED_RATING 970 45#define CEX2A_SPEED_RATING 970
46#define CEX3A_SPEED_RATING 900 /* Fixme: Needs finetuning */
44 47
45#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ 48#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
46#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ 49#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
47 50
51#define CEX3A_MAX_MESSAGE_SIZE CEX2A_MAX_MESSAGE_SIZE
52#define CEX3A_MAX_RESPONSE_SIZE CEX2A_MAX_RESPONSE_SIZE
53
48#define CEX2A_CLEANUP_TIME (15*HZ) 54#define CEX2A_CLEANUP_TIME (15*HZ)
55#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
49 56
50static struct ap_device_id zcrypt_cex2a_ids[] = { 57static struct ap_device_id zcrypt_cex2a_ids[] = {
51 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, 58 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) },
52 { AP_DEVICE(AP_DEVICE_TYPE_CEX2A2) }, 59 { AP_DEVICE(AP_DEVICE_TYPE_CEX3A) },
53 { /* end of list */ }, 60 { /* end of list */ },
54}; 61};
55 62
@@ -298,6 +305,7 @@ static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
298 struct completion work; 305 struct completion work;
299 int rc; 306 int rc;
300 307
308 ap_init_message(&ap_msg);
301 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 309 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
302 if (!ap_msg.message) 310 if (!ap_msg.message)
303 return -ENOMEM; 311 return -ENOMEM;
@@ -335,6 +343,7 @@ static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
335 struct completion work; 343 struct completion work;
336 int rc; 344 int rc;
337 345
346 ap_init_message(&ap_msg);
338 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); 347 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
339 if (!ap_msg.message) 348 if (!ap_msg.message)
340 return -ENOMEM; 349 return -ENOMEM;
@@ -373,31 +382,45 @@ static struct zcrypt_ops zcrypt_cex2a_ops = {
373 */ 382 */
374static int zcrypt_cex2a_probe(struct ap_device *ap_dev) 383static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
375{ 384{
376 struct zcrypt_device *zdev; 385 struct zcrypt_device *zdev = NULL;
377 int rc; 386 int rc = 0;
378 387
379 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); 388 switch (ap_dev->device_type) {
380 if (!zdev) 389 case AP_DEVICE_TYPE_CEX2A:
381 return -ENOMEM; 390 zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE);
382 zdev->ap_dev = ap_dev; 391 if (!zdev)
383 zdev->ops = &zcrypt_cex2a_ops; 392 return -ENOMEM;
384 zdev->online = 1; 393 zdev->user_space_type = ZCRYPT_CEX2A;
385 zdev->user_space_type = ZCRYPT_CEX2A; 394 zdev->type_string = "CEX2A";
386 zdev->type_string = "CEX2A"; 395 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
387 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 396 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
388 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 397 zdev->short_crt = 1;
389 zdev->short_crt = 1; 398 zdev->speed_rating = CEX2A_SPEED_RATING;
390 zdev->speed_rating = CEX2A_SPEED_RATING; 399 break;
391 ap_dev->reply = &zdev->reply; 400 case AP_DEVICE_TYPE_CEX3A:
392 ap_dev->private = zdev; 401 zdev = zcrypt_device_alloc(CEX3A_MAX_RESPONSE_SIZE);
393 rc = zcrypt_device_register(zdev); 402 if (!zdev)
394 if (rc) 403 return -ENOMEM;
395 goto out_free; 404 zdev->user_space_type = ZCRYPT_CEX3A;
396 return 0; 405 zdev->type_string = "CEX3A";
397 406 zdev->min_mod_size = CEX3A_MIN_MOD_SIZE;
398out_free: 407 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
399 ap_dev->private = NULL; 408 zdev->short_crt = 1;
400 zcrypt_device_free(zdev); 409 zdev->speed_rating = CEX3A_SPEED_RATING;
410 break;
411 }
412 if (zdev != NULL) {
413 zdev->ap_dev = ap_dev;
414 zdev->ops = &zcrypt_cex2a_ops;
415 zdev->online = 1;
416 ap_dev->reply = &zdev->reply;
417 ap_dev->private = zdev;
418 rc = zcrypt_device_register(zdev);
419 }
420 if (rc) {
421 ap_dev->private = NULL;
422 zcrypt_device_free(zdev);
423 }
401 return rc; 424 return rc;
402} 425}
403 426
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 17ba81b58c78..e78df3671caf 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -281,6 +281,7 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
281 struct completion work; 281 struct completion work;
282 int rc; 282 int rc;
283 283
284 ap_init_message(&ap_msg);
284 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 285 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
285 if (!ap_msg.message) 286 if (!ap_msg.message)
286 return -ENOMEM; 287 return -ENOMEM;
@@ -318,6 +319,7 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
318 struct completion work; 319 struct completion work;
319 int rc; 320 int rc;
320 321
322 ap_init_message(&ap_msg);
321 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 323 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
322 if (!ap_msg.message) 324 if (!ap_msg.message)
323 return -ENOMEM; 325 return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index f4b0c4795434..a23726a0735c 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -483,6 +483,7 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
483 struct completion work; 483 struct completion work;
484 int rc; 484 int rc;
485 485
486 ap_init_message(&ap_msg);
486 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 487 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
487 if (!ap_msg.message) 488 if (!ap_msg.message)
488 return -ENOMEM; 489 return -ENOMEM;
@@ -521,6 +522,7 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
521 struct completion work; 522 struct completion work;
522 int rc; 523 int rc;
523 524
525 ap_init_message(&ap_msg);
524 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 526 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
525 if (!ap_msg.message) 527 if (!ap_msg.message)
526 return -ENOMEM; 528 return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index c20d4790258e..79c120578e61 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -43,10 +43,13 @@
43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ 43#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */
44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ 44#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ 45#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */
46#define CEX3C_MIN_MOD_SIZE PCIXCC_MIN_MOD_SIZE
47#define CEX3C_MAX_MOD_SIZE PCIXCC_MAX_MOD_SIZE
46 48
47#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */ 49#define PCIXCC_MCL2_SPEED_RATING 7870
48#define PCIXCC_MCL3_SPEED_RATING 7870 50#define PCIXCC_MCL3_SPEED_RATING 7870
49#define CEX2C_SPEED_RATING 8540 51#define CEX2C_SPEED_RATING 7000
52#define CEX3C_SPEED_RATING 6500 /* FIXME: needs finetuning */
50 53
51#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ 54#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */
52#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 55#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
@@ -72,7 +75,7 @@ struct response_type {
72static struct ap_device_id zcrypt_pcixcc_ids[] = { 75static struct ap_device_id zcrypt_pcixcc_ids[] = {
73 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, 76 { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) },
74 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, 77 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) },
75 { AP_DEVICE(AP_DEVICE_TYPE_CEX2C2) }, 78 { AP_DEVICE(AP_DEVICE_TYPE_CEX3C) },
76 { /* end of list */ }, 79 { /* end of list */ },
77}; 80};
78 81
@@ -326,6 +329,11 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
326 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; 329 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
327 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); 330 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
328 331
332 if (memcmp(function_code, "US", 2) == 0)
333 ap_msg->special = 1;
334 else
335 ap_msg->special = 0;
336
329 /* copy data block */ 337 /* copy data block */
330 if (xcRB->request_data_length && 338 if (xcRB->request_data_length &&
331 copy_from_user(req_data, xcRB->request_data_address, 339 copy_from_user(req_data, xcRB->request_data_address,
@@ -361,7 +369,7 @@ static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
361 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr), 369 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
362 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr), 370 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
363 }; 371 };
364 static struct CPRBX static_cprbx = { 372 static struct CPRBX local_cprbx = {
365 .cprb_len = 0x00dc, 373 .cprb_len = 0x00dc,
366 .cprb_ver_id = 0x02, 374 .cprb_ver_id = 0x02,
367 .func_id = {0x54, 0x32}, 375 .func_id = {0x54, 0x32},
@@ -372,7 +380,7 @@ static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
372 380
373 msg->hdr = static_type6_hdrX; 381 msg->hdr = static_type6_hdrX;
374 msg->hdr.FromCardLen2 = random_number_length, 382 msg->hdr.FromCardLen2 = random_number_length,
375 msg->cprbx = static_cprbx; 383 msg->cprbx = local_cprbx;
376 msg->cprbx.rpl_datal = random_number_length, 384 msg->cprbx.rpl_datal = random_number_length,
377 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid); 385 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
378 memcpy(msg->function_code, msg->hdr.function_code, 0x02); 386 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
@@ -561,7 +569,8 @@ static int convert_response_ica(struct zcrypt_device *zdev,
561 if (msg->cprbx.cprb_ver_id == 0x02) 569 if (msg->cprbx.cprb_ver_id == 0x02)
562 return convert_type86_ica(zdev, reply, 570 return convert_type86_ica(zdev, reply,
563 outputdata, outputdatalength); 571 outputdata, outputdatalength);
564 /* no break, incorrect cprb version is an unknown response */ 572 /* Fall through, no break, incorrect cprb version is an unknown
573 * response */
565 default: /* Unknown response type, this should NEVER EVER happen */ 574 default: /* Unknown response type, this should NEVER EVER happen */
566 zdev->online = 0; 575 zdev->online = 0;
567 return -EAGAIN; /* repeat the request on a different device. */ 576 return -EAGAIN; /* repeat the request on a different device. */
@@ -587,7 +596,8 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
587 } 596 }
588 if (msg->cprbx.cprb_ver_id == 0x02) 597 if (msg->cprbx.cprb_ver_id == 0x02)
589 return convert_type86_xcrb(zdev, reply, xcRB); 598 return convert_type86_xcrb(zdev, reply, xcRB);
590 /* no break, incorrect cprb version is an unknown response */ 599 /* Fall through, no break, incorrect cprb version is an unknown
600 * response */
591 default: /* Unknown response type, this should NEVER EVER happen */ 601 default: /* Unknown response type, this should NEVER EVER happen */
592 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ 602 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
593 zdev->online = 0; 603 zdev->online = 0;
@@ -610,7 +620,8 @@ static int convert_response_rng(struct zcrypt_device *zdev,
610 return -EINVAL; 620 return -EINVAL;
611 if (msg->cprbx.cprb_ver_id == 0x02) 621 if (msg->cprbx.cprb_ver_id == 0x02)
612 return convert_type86_rng(zdev, reply, data); 622 return convert_type86_rng(zdev, reply, data);
613 /* no break, incorrect cprb version is an unknown response */ 623 /* Fall through, no break, incorrect cprb version is an unknown
624 * response */
614 default: /* Unknown response type, this should NEVER EVER happen */ 625 default: /* Unknown response type, this should NEVER EVER happen */
615 zdev->online = 0; 626 zdev->online = 0;
616 return -EAGAIN; /* repeat the request on a different device. */ 627 return -EAGAIN; /* repeat the request on a different device. */
@@ -685,6 +696,7 @@ static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
685 }; 696 };
686 int rc; 697 int rc;
687 698
699 ap_init_message(&ap_msg);
688 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 700 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
689 if (!ap_msg.message) 701 if (!ap_msg.message)
690 return -ENOMEM; 702 return -ENOMEM;
@@ -724,6 +736,7 @@ static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
724 }; 736 };
725 int rc; 737 int rc;
726 738
739 ap_init_message(&ap_msg);
727 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 740 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
728 if (!ap_msg.message) 741 if (!ap_msg.message)
729 return -ENOMEM; 742 return -ENOMEM;
@@ -763,6 +776,7 @@ static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
763 }; 776 };
764 int rc; 777 int rc;
765 778
779 ap_init_message(&ap_msg);
766 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 780 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
767 if (!ap_msg.message) 781 if (!ap_msg.message)
768 return -ENOMEM; 782 return -ENOMEM;
@@ -802,6 +816,7 @@ static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
802 }; 816 };
803 int rc; 817 int rc;
804 818
819 ap_init_message(&ap_msg);
805 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); 820 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
806 if (!ap_msg.message) 821 if (!ap_msg.message)
807 return -ENOMEM; 822 return -ENOMEM;
@@ -969,6 +984,7 @@ static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
969 } __attribute__((packed)) *reply; 984 } __attribute__((packed)) *reply;
970 int rc, i; 985 int rc, i;
971 986
987 ap_init_message(&ap_msg);
972 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 988 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
973 if (!ap_msg.message) 989 if (!ap_msg.message)
974 return -ENOMEM; 990 return -ENOMEM;
@@ -1013,14 +1029,15 @@ out_free:
1013static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) 1029static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1014{ 1030{
1015 struct zcrypt_device *zdev; 1031 struct zcrypt_device *zdev;
1016 int rc; 1032 int rc = 0;
1017 1033
1018 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE); 1034 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
1019 if (!zdev) 1035 if (!zdev)
1020 return -ENOMEM; 1036 return -ENOMEM;
1021 zdev->ap_dev = ap_dev; 1037 zdev->ap_dev = ap_dev;
1022 zdev->online = 1; 1038 zdev->online = 1;
1023 if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) { 1039 switch (ap_dev->device_type) {
1040 case AP_DEVICE_TYPE_PCIXCC:
1024 rc = zcrypt_pcixcc_mcl(ap_dev); 1041 rc = zcrypt_pcixcc_mcl(ap_dev);
1025 if (rc < 0) { 1042 if (rc < 0) {
1026 zcrypt_device_free(zdev); 1043 zcrypt_device_free(zdev);
@@ -1038,13 +1055,25 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
1038 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1055 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1039 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1056 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1040 } 1057 }
1041 } else { 1058 break;
1059 case AP_DEVICE_TYPE_CEX2C:
1042 zdev->user_space_type = ZCRYPT_CEX2C; 1060 zdev->user_space_type = ZCRYPT_CEX2C;
1043 zdev->type_string = "CEX2C"; 1061 zdev->type_string = "CEX2C";
1044 zdev->speed_rating = CEX2C_SPEED_RATING; 1062 zdev->speed_rating = CEX2C_SPEED_RATING;
1045 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; 1063 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
1046 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; 1064 zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
1065 break;
1066 case AP_DEVICE_TYPE_CEX3C:
1067 zdev->user_space_type = ZCRYPT_CEX3C;
1068 zdev->type_string = "CEX3C";
1069 zdev->speed_rating = CEX3C_SPEED_RATING;
1070 zdev->min_mod_size = CEX3C_MIN_MOD_SIZE;
1071 zdev->max_mod_size = CEX3C_MAX_MOD_SIZE;
1072 break;
1073 default:
1074 goto out_free;
1047 } 1075 }
1076
1048 rc = zcrypt_pcixcc_rng_supported(ap_dev); 1077 rc = zcrypt_pcixcc_rng_supported(ap_dev);
1049 if (rc < 0) { 1078 if (rc < 0) {
1050 zcrypt_device_free(zdev); 1079 zcrypt_device_free(zdev);
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 96eddb3b1d08..6cab5a62f99e 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o
10obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o
11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
12obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o 13qeth_l2-y += qeth_l2_main.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c63babefb698..3c77bfe0764c 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -90,7 +90,6 @@
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/types.h> 91#include <linux/types.h>
92 92
93#include "cu3088.h"
94#include "claw.h" 93#include "claw.h"
95 94
96/* 95/*
@@ -258,6 +257,9 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev)
258 return -EPERM; 257 return -EPERM;
259} 258}
260 259
260/* the root device for claw group devices */
261static struct device *claw_root_dev;
262
261/* ccwgroup table */ 263/* ccwgroup table */
262 264
263static struct ccwgroup_driver claw_group_driver = { 265static struct ccwgroup_driver claw_group_driver = {
@@ -272,6 +274,47 @@ static struct ccwgroup_driver claw_group_driver = {
272 .prepare = claw_pm_prepare, 274 .prepare = claw_pm_prepare,
273}; 275};
274 276
277static struct ccw_device_id claw_ids[] = {
278 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
279 {},
280};
281MODULE_DEVICE_TABLE(ccw, claw_ids);
282
283static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE,
285 .name = "claw",
286 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev,
289};
290
291static ssize_t
292claw_driver_group_store(struct device_driver *ddrv, const char *buf,
293 size_t count)
294{
295 int err;
296 err = ccwgroup_create_from_string(claw_root_dev,
297 claw_group_driver.driver_id,
298 &claw_ccw_driver, 3, buf);
299 return err ? err : count;
300}
301
302static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
303
304static struct attribute *claw_group_attrs[] = {
305 &driver_attr_group.attr,
306 NULL,
307};
308
309static struct attribute_group claw_group_attr_group = {
310 .attrs = claw_group_attrs,
311};
312
313static const struct attribute_group *claw_group_attr_groups[] = {
314 &claw_group_attr_group,
315 NULL,
316};
317
275/* 318/*
276* Key functions 319* Key functions
277*/ 320*/
@@ -3326,7 +3369,11 @@ claw_remove_files(struct device *dev)
3326static void __exit 3369static void __exit
3327claw_cleanup(void) 3370claw_cleanup(void)
3328{ 3371{
3329 unregister_cu3088_discipline(&claw_group_driver); 3372 driver_remove_file(&claw_group_driver.driver,
3373 &driver_attr_group);
3374 ccwgroup_driver_unregister(&claw_group_driver);
3375 ccw_driver_unregister(&claw_ccw_driver);
3376 root_device_unregister(claw_root_dev);
3330 claw_unregister_debug_facility(); 3377 claw_unregister_debug_facility();
3331 pr_info("Driver unloaded\n"); 3378 pr_info("Driver unloaded\n");
3332 3379
@@ -3348,16 +3395,31 @@ claw_init(void)
3348 if (ret) { 3395 if (ret) {
3349 pr_err("Registering with the S/390 debug feature" 3396 pr_err("Registering with the S/390 debug feature"
3350 " failed with error code %d\n", ret); 3397 " failed with error code %d\n", ret);
3351 return ret; 3398 goto out_err;
3352 } 3399 }
3353 CLAW_DBF_TEXT(2, setup, "init_mod"); 3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3354 ret = register_cu3088_discipline(&claw_group_driver); 3401 claw_root_dev = root_device_register("qeth");
3355 if (ret) { 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3356 CLAW_DBF_TEXT(2, setup, "init_bad"); 3403 if (ret)
3357 claw_unregister_debug_facility(); 3404 goto register_err;
3358 pr_err("Registering with the cu3088 device driver failed " 3405 ret = ccw_driver_register(&claw_ccw_driver);
3359 "with error code %d\n", ret); 3406 if (ret)
3360 } 3407 goto ccw_err;
3408 claw_group_driver.driver.groups = claw_group_attr_groups;
3409 ret = ccwgroup_driver_register(&claw_group_driver);
3410 if (ret)
3411 goto ccwgroup_err;
3412 return 0;
3413
3414ccwgroup_err:
3415 ccw_driver_unregister(&claw_ccw_driver);
3416ccw_err:
3417 root_device_unregister(claw_root_dev);
3418register_err:
3419 CLAW_DBF_TEXT(2, setup, "init_bad");
3420 claw_unregister_debug_facility();
3421out_err:
3422 pr_err("Initializing the claw device driver failed\n");
3361 return ret; 3423 return ret;
3362} 3424}
3363 3425
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 005072c420d3..46d59a13db12 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -129,6 +129,18 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
129 } \ 129 } \
130 } while (0) 130 } while (0)
131 131
132/**
133 * Enum for classifying detected devices.
134 */
135enum claw_channel_types {
136 /* Device is not a channel */
137 claw_channel_type_none,
138
139 /* Device is a CLAW channel device */
140 claw_channel_type_claw
141};
142
143
132/******************************************************* 144/*******************************************************
133* Define Control Blocks * 145* Define Control Blocks *
134* * 146* *
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4ded9ac2c5ef..70eb7f138414 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -44,7 +44,6 @@
44#include <asm/idals.h> 44#include <asm/idals.h>
45 45
46#include "fsm.h" 46#include "fsm.h"
47#include "cu3088.h"
48 47
49#include "ctcm_dbug.h" 48#include "ctcm_dbug.h"
50#include "ctcm_main.h" 49#include "ctcm_main.h"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2326aba9807a..046d077fabbb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -39,7 +39,6 @@
39#include <asm/idals.h> 39#include <asm/idals.h>
40 40
41#include "fsm.h" 41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h" 42#include "ctcm_main.h"
44 43
45/* 44/*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c5b83874500c..e35713dd0504 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -51,12 +51,16 @@
51 51
52#include <asm/idals.h> 52#include <asm/idals.h>
53 53
54#include "cu3088.h"
55#include "ctcm_fsms.h" 54#include "ctcm_fsms.h"
56#include "ctcm_main.h" 55#include "ctcm_main.h"
57 56
58/* Some common global variables */ 57/* Some common global variables */
59 58
59/**
60 * The root device for ctcm group devices
61 */
62static struct device *ctcm_root_dev;
63
60/* 64/*
61 * Linked list of all detected channels. 65 * Linked list of all detected channels.
62 */ 66 */
@@ -246,7 +250,7 @@ static void channel_remove(struct channel *ch)
246 * 250 *
247 * returns Pointer to a channel or NULL if no matching channel available. 251 * returns Pointer to a channel or NULL if no matching channel available.
248 */ 252 */
249static struct channel *channel_get(enum channel_types type, 253static struct channel *channel_get(enum ctcm_channel_types type,
250 char *id, int direction) 254 char *id, int direction)
251{ 255{
252 struct channel *ch = channels; 256 struct channel *ch = channels;
@@ -1342,7 +1346,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1342 * 1346 *
1343 * returns 0 on success, !0 on error. 1347 * returns 0 on success, !0 on error.
1344 */ 1348 */
1345static int add_channel(struct ccw_device *cdev, enum channel_types type, 1349static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1346 struct ctcm_priv *priv) 1350 struct ctcm_priv *priv)
1347{ 1351{
1348 struct channel **c = &channels; 1352 struct channel **c = &channels;
@@ -1501,13 +1505,13 @@ free_return: /* note that all channel pointers are 0 or valid */
1501/* 1505/*
1502 * Return type of a detected device. 1506 * Return type of a detected device.
1503 */ 1507 */
1504static enum channel_types get_channel_type(struct ccw_device_id *id) 1508static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1505{ 1509{
1506 enum channel_types type; 1510 enum ctcm_channel_types type;
1507 type = (enum channel_types)id->driver_info; 1511 type = (enum ctcm_channel_types)id->driver_info;
1508 1512
1509 if (type == channel_type_ficon) 1513 if (type == ctcm_channel_type_ficon)
1510 type = channel_type_escon; 1514 type = ctcm_channel_type_escon;
1511 1515
1512 return type; 1516 return type;
1513} 1517}
@@ -1525,16 +1529,21 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1525 char read_id[CTCM_ID_SIZE]; 1529 char read_id[CTCM_ID_SIZE];
1526 char write_id[CTCM_ID_SIZE]; 1530 char write_id[CTCM_ID_SIZE];
1527 int direction; 1531 int direction;
1528 enum channel_types type; 1532 enum ctcm_channel_types type;
1529 struct ctcm_priv *priv; 1533 struct ctcm_priv *priv;
1530 struct net_device *dev; 1534 struct net_device *dev;
1531 struct ccw_device *cdev0; 1535 struct ccw_device *cdev0;
1532 struct ccw_device *cdev1; 1536 struct ccw_device *cdev1;
1537 struct channel *readc;
1538 struct channel *writec;
1533 int ret; 1539 int ret;
1540 int result;
1534 1541
1535 priv = dev_get_drvdata(&cgdev->dev); 1542 priv = dev_get_drvdata(&cgdev->dev);
1536 if (!priv) 1543 if (!priv) {
1537 return -ENODEV; 1544 result = -ENODEV;
1545 goto out_err_result;
1546 }
1538 1547
1539 cdev0 = cgdev->cdev[0]; 1548 cdev0 = cgdev->cdev[0];
1540 cdev1 = cgdev->cdev[1]; 1549 cdev1 = cgdev->cdev[1];
@@ -1545,31 +1554,40 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1545 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev)); 1554 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
1546 1555
1547 ret = add_channel(cdev0, type, priv); 1556 ret = add_channel(cdev0, type, priv);
1548 if (ret) 1557 if (ret) {
1549 return ret; 1558 result = ret;
1559 goto out_err_result;
1560 }
1550 ret = add_channel(cdev1, type, priv); 1561 ret = add_channel(cdev1, type, priv);
1551 if (ret) 1562 if (ret) {
1552 return ret; 1563 result = ret;
1564 goto out_remove_channel1;
1565 }
1553 1566
1554 ret = ccw_device_set_online(cdev0); 1567 ret = ccw_device_set_online(cdev0);
1555 if (ret != 0) { 1568 if (ret != 0) {
1556 /* may be ok to fail now - can be done later */
1557 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1569 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1558 "%s(%s) set_online rc=%d", 1570 "%s(%s) set_online rc=%d",
1559 CTCM_FUNTAIL, read_id, ret); 1571 CTCM_FUNTAIL, read_id, ret);
1572 result = -EIO;
1573 goto out_remove_channel2;
1560 } 1574 }
1561 1575
1562 ret = ccw_device_set_online(cdev1); 1576 ret = ccw_device_set_online(cdev1);
1563 if (ret != 0) { 1577 if (ret != 0) {
1564 /* may be ok to fail now - can be done later */
1565 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1578 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1566 "%s(%s) set_online rc=%d", 1579 "%s(%s) set_online rc=%d",
1567 CTCM_FUNTAIL, write_id, ret); 1580 CTCM_FUNTAIL, write_id, ret);
1581
1582 result = -EIO;
1583 goto out_ccw1;
1568 } 1584 }
1569 1585
1570 dev = ctcm_init_netdevice(priv); 1586 dev = ctcm_init_netdevice(priv);
1571 if (dev == NULL) 1587 if (dev == NULL) {
1572 goto out; 1588 result = -ENODEV;
1589 goto out_ccw2;
1590 }
1573 1591
1574 for (direction = READ; direction <= WRITE; direction++) { 1592 for (direction = READ; direction <= WRITE; direction++) {
1575 priv->channel[direction] = 1593 priv->channel[direction] =
@@ -1587,12 +1605,14 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1587 /* sysfs magic */ 1605 /* sysfs magic */
1588 SET_NETDEV_DEV(dev, &cgdev->dev); 1606 SET_NETDEV_DEV(dev, &cgdev->dev);
1589 1607
1590 if (register_netdev(dev)) 1608 if (register_netdev(dev)) {
1591 goto out_dev; 1609 result = -ENODEV;
1610 goto out_dev;
1611 }
1592 1612
1593 if (ctcm_add_attributes(&cgdev->dev)) { 1613 if (ctcm_add_attributes(&cgdev->dev)) {
1594 unregister_netdev(dev); 1614 result = -ENODEV;
1595 goto out_dev; 1615 goto out_unregister;
1596 } 1616 }
1597 1617
1598 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); 1618 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
@@ -1608,13 +1628,22 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1608 priv->channel[WRITE]->id, priv->protocol); 1628 priv->channel[WRITE]->id, priv->protocol);
1609 1629
1610 return 0; 1630 return 0;
1631out_unregister:
1632 unregister_netdev(dev);
1611out_dev: 1633out_dev:
1612 ctcm_free_netdevice(dev); 1634 ctcm_free_netdevice(dev);
1613out: 1635out_ccw2:
1614 ccw_device_set_offline(cgdev->cdev[1]); 1636 ccw_device_set_offline(cgdev->cdev[1]);
1637out_ccw1:
1615 ccw_device_set_offline(cgdev->cdev[0]); 1638 ccw_device_set_offline(cgdev->cdev[0]);
1616 1639out_remove_channel2:
1617 return -ENODEV; 1640 readc = channel_get(type, read_id, READ);
1641 channel_remove(readc);
1642out_remove_channel1:
1643 writec = channel_get(type, write_id, WRITE);
1644 channel_remove(writec);
1645out_err_result:
1646 return result;
1618} 1647}
1619 1648
1620/** 1649/**
@@ -1695,6 +1724,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1695 return 0; 1724 return 0;
1696 netif_device_detach(priv->channel[READ]->netdev); 1725 netif_device_detach(priv->channel[READ]->netdev);
1697 ctcm_close(priv->channel[READ]->netdev); 1726 ctcm_close(priv->channel[READ]->netdev);
1727 if (!wait_event_timeout(priv->fsm->wait_q,
1728 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1729 netif_device_attach(priv->channel[READ]->netdev);
1730 return -EBUSY;
1731 }
1698 ccw_device_set_offline(gdev->cdev[1]); 1732 ccw_device_set_offline(gdev->cdev[1]);
1699 ccw_device_set_offline(gdev->cdev[0]); 1733 ccw_device_set_offline(gdev->cdev[0]);
1700 return 0; 1734 return 0;
@@ -1719,6 +1753,22 @@ err_out:
1719 return rc; 1753 return rc;
1720} 1754}
1721 1755
1756static struct ccw_device_id ctcm_ids[] = {
1757 {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1758 {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1759 {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1760 {},
1761};
1762MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1763
1764static struct ccw_driver ctcm_ccw_driver = {
1765 .owner = THIS_MODULE,
1766 .name = "ctcm",
1767 .ids = ctcm_ids,
1768 .probe = ccwgroup_probe_ccwdev,
1769 .remove = ccwgroup_remove_ccwdev,
1770};
1771
1722static struct ccwgroup_driver ctcm_group_driver = { 1772static struct ccwgroup_driver ctcm_group_driver = {
1723 .owner = THIS_MODULE, 1773 .owner = THIS_MODULE,
1724 .name = CTC_DRIVER_NAME, 1774 .name = CTC_DRIVER_NAME,
@@ -1733,6 +1783,33 @@ static struct ccwgroup_driver ctcm_group_driver = {
1733 .restore = ctcm_pm_resume, 1783 .restore = ctcm_pm_resume,
1734}; 1784};
1735 1785
1786static ssize_t
1787ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
1788 size_t count)
1789{
1790 int err;
1791
1792 err = ccwgroup_create_from_string(ctcm_root_dev,
1793 ctcm_group_driver.driver_id,
1794 &ctcm_ccw_driver, 2, buf);
1795 return err ? err : count;
1796}
1797
1798static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1799
1800static struct attribute *ctcm_group_attrs[] = {
1801 &driver_attr_group.attr,
1802 NULL,
1803};
1804
1805static struct attribute_group ctcm_group_attr_group = {
1806 .attrs = ctcm_group_attrs,
1807};
1808
1809static const struct attribute_group *ctcm_group_attr_groups[] = {
1810 &ctcm_group_attr_group,
1811 NULL,
1812};
1736 1813
1737/* 1814/*
1738 * Module related routines 1815 * Module related routines
@@ -1746,7 +1823,10 @@ static struct ccwgroup_driver ctcm_group_driver = {
1746 */ 1823 */
1747static void __exit ctcm_exit(void) 1824static void __exit ctcm_exit(void)
1748{ 1825{
1749 unregister_cu3088_discipline(&ctcm_group_driver); 1826 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1827 ccwgroup_driver_unregister(&ctcm_group_driver);
1828 ccw_driver_unregister(&ctcm_ccw_driver);
1829 root_device_unregister(ctcm_root_dev);
1750 ctcm_unregister_dbf_views(); 1830 ctcm_unregister_dbf_views();
1751 pr_info("CTCM driver unloaded\n"); 1831 pr_info("CTCM driver unloaded\n");
1752} 1832}
@@ -1772,17 +1852,31 @@ static int __init ctcm_init(void)
1772 channels = NULL; 1852 channels = NULL;
1773 1853
1774 ret = ctcm_register_dbf_views(); 1854 ret = ctcm_register_dbf_views();
1775 if (ret) { 1855 if (ret)
1776 return ret; 1856 goto out_err;
1777 } 1857 ctcm_root_dev = root_device_register("ctcm");
1778 ret = register_cu3088_discipline(&ctcm_group_driver); 1858 ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
1779 if (ret) { 1859 if (ret)
1780 ctcm_unregister_dbf_views(); 1860 goto register_err;
1781 pr_err("%s / register_cu3088_discipline failed, ret = %d\n", 1861 ret = ccw_driver_register(&ctcm_ccw_driver);
1782 __func__, ret); 1862 if (ret)
1783 return ret; 1863 goto ccw_err;
1784 } 1864 ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
1865 ret = ccwgroup_driver_register(&ctcm_group_driver);
1866 if (ret)
1867 goto ccwgroup_err;
1785 print_banner(); 1868 print_banner();
1869 return 0;
1870
1871ccwgroup_err:
1872 ccw_driver_unregister(&ctcm_ccw_driver);
1873ccw_err:
1874 root_device_unregister(ctcm_root_dev);
1875register_err:
1876 ctcm_unregister_dbf_views();
1877out_err:
1878 pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1879 __func__, ret);
1786 return ret; 1880 return ret;
1787} 1881}
1788 1882
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d925e732b7d8..d34fa14f44e7 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -16,7 +16,6 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17 17
18#include "fsm.h" 18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h" 19#include "ctcm_dbug.h"
21#include "ctcm_mpc.h" 20#include "ctcm_mpc.h"
22 21
@@ -66,6 +65,23 @@
66 ctcmpc_dumpit(buf, len); \ 65 ctcmpc_dumpit(buf, len); \
67 } while (0) 66 } while (0)
68 67
68/**
69 * Enum for classifying detected devices
70 */
71enum ctcm_channel_types {
72 /* Device is not a channel */
73 ctcm_channel_type_none,
74
75 /* Device is a CTC/A */
76 ctcm_channel_type_parallel,
77
78 /* Device is a FICON channel */
79 ctcm_channel_type_ficon,
80
81 /* Device is a ESCON channel */
82 ctcm_channel_type_escon
83};
84
69/* 85/*
70 * CCW commands, used in this driver. 86 * CCW commands, used in this driver.
71 */ 87 */
@@ -121,7 +137,7 @@ struct channel {
121 * Type of this channel. 137 * Type of this channel.
122 * CTC/A or Escon for valid channels. 138 * CTC/A or Escon for valid channels.
123 */ 139 */
124 enum channel_types type; 140 enum ctcm_channel_types type;
125 /* 141 /*
126 * Misc. flags. See CHANNEL_FLAGS_... below 142 * Misc. flags. See CHANNEL_FLAGS_... below
127 */ 143 */
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 781e18be7e8f..5978b390153f 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,7 +53,6 @@
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <asm/idals.h> 54#include <asm/idals.h>
55 55
56#include "cu3088.h"
57#include "ctcm_mpc.h" 56#include "ctcm_mpc.h"
58#include "ctcm_main.h" 57#include "ctcm_main.h"
59#include "ctcm_fsms.h" 58#include "ctcm_fsms.h"
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8452bb052d68..738ad26c74a7 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -158,6 +158,15 @@ static ssize_t ctcm_proto_store(struct device *dev,
158 return count; 158 return count;
159} 159}
160 160
161const char *ctcm_type[] = {
162 "not a channel",
163 "CTC/A",
164 "FICON channel",
165 "ESCON channel",
166 "unknown channel type",
167 "unsupported channel type",
168};
169
161static ssize_t ctcm_type_show(struct device *dev, 170static ssize_t ctcm_type_show(struct device *dev,
162 struct device_attribute *attr, char *buf) 171 struct device_attribute *attr, char *buf)
163{ 172{
@@ -168,7 +177,7 @@ static ssize_t ctcm_type_show(struct device *dev,
168 return -ENODEV; 177 return -ENODEV;
169 178
170 return sprintf(buf, "%s\n", 179 return sprintf(buf, "%s\n",
171 cu3088_type[cgdev->cdev[0]->id.driver_info]); 180 ctcm_type[cgdev->cdev[0]->id.driver_info]);
172} 181}
173 182
174static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); 183static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
deleted file mode 100644
index 48383459e99b..000000000000
--- a/drivers/s390/net/cu3088.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * CTC / LCS ccw_device driver
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/err.h>
27
28#include <asm/ccwdev.h>
29#include <asm/ccwgroup.h>
30
31#include "cu3088.h"
32
33const char *cu3088_type[] = {
34 "not a channel",
35 "CTC/A",
36 "ESCON channel",
37 "FICON channel",
38 "OSA LCS card",
39 "CLAW channel device",
40 "unknown channel type",
41 "unsupported channel type",
42};
43
44/* static definitions */
45
46static struct ccw_device_id cu3088_ids[] = {
47 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
48 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
49 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
50 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
51 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
52 { /* end of list */ }
53};
54
55static struct ccw_driver cu3088_driver;
56
57static struct device *cu3088_root_dev;
58
59static ssize_t
60group_write(struct device_driver *drv, const char *buf, size_t count)
61{
62 int ret;
63 struct ccwgroup_driver *cdrv;
64
65 cdrv = to_ccwgroupdrv(drv);
66 if (!cdrv)
67 return -EINVAL;
68 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
69 &cu3088_driver, 2, buf);
70
71 return (ret == 0) ? count : ret;
72}
73
74static DRIVER_ATTR(group, 0200, NULL, group_write);
75
76/* Register-unregister for ctc&lcs */
77int
78register_cu3088_discipline(struct ccwgroup_driver *dcp)
79{
80 int rc;
81
82 if (!dcp)
83 return -EINVAL;
84
85 /* Register discipline.*/
86 rc = ccwgroup_driver_register(dcp);
87 if (rc)
88 return rc;
89
90 rc = driver_create_file(&dcp->driver, &driver_attr_group);
91 if (rc)
92 ccwgroup_driver_unregister(dcp);
93
94 return rc;
95
96}
97
98void
99unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
100{
101 if (!dcp)
102 return;
103
104 driver_remove_file(&dcp->driver, &driver_attr_group);
105 ccwgroup_driver_unregister(dcp);
106}
107
108static struct ccw_driver cu3088_driver = {
109 .owner = THIS_MODULE,
110 .ids = cu3088_ids,
111 .name = "cu3088",
112 .probe = ccwgroup_probe_ccwdev,
113 .remove = ccwgroup_remove_ccwdev,
114};
115
116/* module setup */
117static int __init
118cu3088_init (void)
119{
120 int rc;
121
122 cu3088_root_dev = root_device_register("cu3088");
123 if (IS_ERR(cu3088_root_dev))
124 return PTR_ERR(cu3088_root_dev);
125 rc = ccw_driver_register(&cu3088_driver);
126 if (rc)
127 root_device_unregister(cu3088_root_dev);
128
129 return rc;
130}
131
132static void __exit
133cu3088_exit (void)
134{
135 ccw_driver_unregister(&cu3088_driver);
136 root_device_unregister(cu3088_root_dev);
137}
138
139MODULE_DEVICE_TABLE(ccw,cu3088_ids);
140MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
141MODULE_LICENSE("GPL");
142
143module_init(cu3088_init);
144module_exit(cu3088_exit);
145
146EXPORT_SYMBOL_GPL(cu3088_type);
147EXPORT_SYMBOL_GPL(register_cu3088_discipline);
148EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
deleted file mode 100644
index d8558a7105a5..000000000000
--- a/drivers/s390/net/cu3088.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a OSA2 card */
21 channel_type_osa2,
22
23 /* Device is a CLAW channel device */
24 channel_type_claw,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 2c1db8036b7c..cae48cbc5e96 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -27,6 +27,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
27 return NULL; 27 return NULL;
28 } 28 }
29 strlcpy(this->name, name, sizeof(this->name)); 29 strlcpy(this->name, name, sizeof(this->name));
30 init_waitqueue_head(&this->wait_q);
30 31
31 f = kzalloc(sizeof(fsm), order); 32 f = kzalloc(sizeof(fsm), order);
32 if (f == NULL) { 33 if (f == NULL) {
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index af679c10f1bd..1e8b235d95b5 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -66,6 +66,7 @@ typedef struct fsm_instance_t {
66 char name[16]; 66 char name[16];
67 void *userdata; 67 void *userdata;
68 int userint; 68 int userint;
69 wait_queue_head_t wait_q;
69#if FSM_DEBUG_HISTORY 70#if FSM_DEBUG_HISTORY
70 int history_index; 71 int history_index;
71 int history_size; 72 int history_size;
@@ -197,6 +198,7 @@ fsm_newstate(fsm_instance *fi, int newstate)
197 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, 198 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
198 fi->f->state_names[newstate]); 199 fi->f->state_names[newstate]);
199#endif 200#endif
201 wake_up(&fi->wait_q);
200} 202}
201 203
202/** 204/**
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a70de9b4bf29..f6cc46dc0501 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -47,7 +47,6 @@
47#include <asm/ccwgroup.h> 47#include <asm/ccwgroup.h>
48 48
49#include "lcs.h" 49#include "lcs.h"
50#include "cu3088.h"
51 50
52 51
53#if !defined(CONFIG_NET_ETHERNET) && \ 52#if !defined(CONFIG_NET_ETHERNET) && \
@@ -60,7 +59,11 @@
60 */ 59 */
61 60
62static char version[] __initdata = "LCS driver"; 61static char version[] __initdata = "LCS driver";
63static char debug_buffer[255]; 62
63/**
64 * the root device for lcs group devices
65 */
66static struct device *lcs_root_dev;
64 67
65/** 68/**
66 * Some prototypes. 69 * Some prototypes.
@@ -76,6 +79,7 @@ static int lcs_recovery(void *ptr);
76/** 79/**
77 * Debug Facility Stuff 80 * Debug Facility Stuff
78 */ 81 */
82static char debug_buffer[255];
79static debug_info_t *lcs_dbf_setup; 83static debug_info_t *lcs_dbf_setup;
80static debug_info_t *lcs_dbf_trace; 84static debug_info_t *lcs_dbf_trace;
81 85
@@ -889,7 +893,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
889 rc = lcs_ready_buffer(&card->write, buffer); 893 rc = lcs_ready_buffer(&card->write, buffer);
890 if (rc) 894 if (rc)
891 return rc; 895 return rc;
892 init_timer(&timer); 896 init_timer_on_stack(&timer);
893 timer.function = lcs_lancmd_timeout; 897 timer.function = lcs_lancmd_timeout;
894 timer.data = (unsigned long) reply; 898 timer.data = (unsigned long) reply;
895 timer.expires = jiffies + HZ*card->lancmd_timeout; 899 timer.expires = jiffies + HZ*card->lancmd_timeout;
@@ -1968,6 +1972,15 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1968 1972
1969static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1973static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1970 1974
1975const char *lcs_type[] = {
1976 "not a channel",
1977 "2216 parallel",
1978 "2216 channel",
1979 "OSA LCS card",
1980 "unknown channel type",
1981 "unsupported channel type",
1982};
1983
1971static ssize_t 1984static ssize_t
1972lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1985lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1973{ 1986{
@@ -1977,7 +1990,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1977 if (!cgdev) 1990 if (!cgdev)
1978 return -ENODEV; 1991 return -ENODEV;
1979 1992
1980 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); 1993 return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1981} 1994}
1982 1995
1983static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); 1996static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
@@ -2130,8 +2143,12 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2130 card->write.ccwdev = ccwgdev->cdev[1]; 2143 card->write.ccwdev = ccwgdev->cdev[1];
2131 2144
2132 recover_state = card->state; 2145 recover_state = card->state;
2133 ccw_device_set_online(card->read.ccwdev); 2146 rc = ccw_device_set_online(card->read.ccwdev);
2134 ccw_device_set_online(card->write.ccwdev); 2147 if (rc)
2148 goto out_err;
2149 rc = ccw_device_set_online(card->write.ccwdev);
2150 if (rc)
2151 goto out_werr;
2135 2152
2136 LCS_DBF_TEXT(3, setup, "lcsnewdv"); 2153 LCS_DBF_TEXT(3, setup, "lcsnewdv");
2137 2154
@@ -2210,8 +2227,10 @@ netdev_out:
2210 return 0; 2227 return 0;
2211out: 2228out:
2212 2229
2213 ccw_device_set_offline(card->read.ccwdev);
2214 ccw_device_set_offline(card->write.ccwdev); 2230 ccw_device_set_offline(card->write.ccwdev);
2231out_werr:
2232 ccw_device_set_offline(card->read.ccwdev);
2233out_err:
2215 return -ENODEV; 2234 return -ENODEV;
2216} 2235}
2217 2236
@@ -2364,6 +2383,22 @@ static int lcs_restore(struct ccwgroup_device *gdev)
2364 return lcs_pm_resume(card); 2383 return lcs_pm_resume(card);
2365} 2384}
2366 2385
2386static struct ccw_device_id lcs_ids[] = {
2387 {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2388 {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2389 {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2390 {},
2391};
2392MODULE_DEVICE_TABLE(ccw, lcs_ids);
2393
2394static struct ccw_driver lcs_ccw_driver = {
2395 .owner = THIS_MODULE,
2396 .name = "lcs",
2397 .ids = lcs_ids,
2398 .probe = ccwgroup_probe_ccwdev,
2399 .remove = ccwgroup_remove_ccwdev,
2400};
2401
2367/** 2402/**
2368 * LCS ccwgroup driver registration 2403 * LCS ccwgroup driver registration
2369 */ 2404 */
@@ -2383,6 +2418,33 @@ static struct ccwgroup_driver lcs_group_driver = {
2383 .restore = lcs_restore, 2418 .restore = lcs_restore,
2384}; 2419};
2385 2420
2421static ssize_t
2422lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2423 size_t count)
2424{
2425 int err;
2426 err = ccwgroup_create_from_string(lcs_root_dev,
2427 lcs_group_driver.driver_id,
2428 &lcs_ccw_driver, 2, buf);
2429 return err ? err : count;
2430}
2431
2432static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2433
2434static struct attribute *lcs_group_attrs[] = {
2435 &driver_attr_group.attr,
2436 NULL,
2437};
2438
2439static struct attribute_group lcs_group_attr_group = {
2440 .attrs = lcs_group_attrs,
2441};
2442
2443static const struct attribute_group *lcs_group_attr_groups[] = {
2444 &lcs_group_attr_group,
2445 NULL,
2446};
2447
2386/** 2448/**
2387 * LCS Module/Kernel initialization function 2449 * LCS Module/Kernel initialization function
2388 */ 2450 */
@@ -2394,17 +2456,30 @@ __init lcs_init_module(void)
2394 pr_info("Loading %s\n", version); 2456 pr_info("Loading %s\n", version);
2395 rc = lcs_register_debug_facility(); 2457 rc = lcs_register_debug_facility();
2396 LCS_DBF_TEXT(0, setup, "lcsinit"); 2458 LCS_DBF_TEXT(0, setup, "lcsinit");
2397 if (rc) { 2459 if (rc)
2398 pr_err("Initialization failed\n"); 2460 goto out_err;
2399 return rc; 2461 lcs_root_dev = root_device_register("lcs");
2400 } 2462 rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2401 2463 if (rc)
2402 rc = register_cu3088_discipline(&lcs_group_driver); 2464 goto register_err;
2403 if (rc) { 2465 rc = ccw_driver_register(&lcs_ccw_driver);
2404 pr_err("Initialization failed\n"); 2466 if (rc)
2405 return rc; 2467 goto ccw_err;
2406 } 2468 lcs_group_driver.driver.groups = lcs_group_attr_groups;
2469 rc = ccwgroup_driver_register(&lcs_group_driver);
2470 if (rc)
2471 goto ccwgroup_err;
2407 return 0; 2472 return 0;
2473
2474ccwgroup_err:
2475 ccw_driver_unregister(&lcs_ccw_driver);
2476ccw_err:
2477 root_device_unregister(lcs_root_dev);
2478register_err:
2479 lcs_unregister_debug_facility();
2480out_err:
2481 pr_err("Initializing the lcs device driver failed\n");
2482 return rc;
2408} 2483}
2409 2484
2410 2485
@@ -2416,7 +2491,11 @@ __exit lcs_cleanup_module(void)
2416{ 2491{
2417 pr_info("Terminating lcs module.\n"); 2492 pr_info("Terminating lcs module.\n");
2418 LCS_DBF_TEXT(0, trace, "cleanup"); 2493 LCS_DBF_TEXT(0, trace, "cleanup");
2419 unregister_cu3088_discipline(&lcs_group_driver); 2494 driver_remove_file(&lcs_group_driver.driver,
2495 &driver_attr_group);
2496 ccwgroup_driver_unregister(&lcs_group_driver);
2497 ccw_driver_unregister(&lcs_ccw_driver);
2498 root_device_unregister(lcs_root_dev);
2420 lcs_unregister_debug_facility(); 2499 lcs_unregister_debug_facility();
2421} 2500}
2422 2501
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 6d668642af27..8c03392ac833 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -36,6 +36,24 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
36#define CARD_FROM_DEV(cdev) \ 36#define CARD_FROM_DEV(cdev) \
37 (struct lcs_card *) dev_get_drvdata( \ 37 (struct lcs_card *) dev_get_drvdata( \
38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); 38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
39
40/**
41 * Enum for classifying detected devices.
42 */
43enum lcs_channel_types {
44 /* Device is not a channel */
45 lcs_channel_type_none,
46
47 /* Device is a 2216 channel */
48 lcs_channel_type_parallel,
49
50 /* Device is a 2216 channel */
51 lcs_channel_type_2216,
52
53 /* Device is a OSA2 card */
54 lcs_channel_type_osa2
55};
56
39/** 57/**
40 * CCW commands used in this driver 58 * CCW commands used in this driver
41 */ 59 */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index c84eadd3602a..65ebee0a3266 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -113,11 +113,9 @@ static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
113#define IUCV_DBF_TEXT_(name, level, text...) \ 113#define IUCV_DBF_TEXT_(name, level, text...) \
114 do { \ 114 do { \
115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ 115 if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
116 char* iucv_dbf_txt_buf = \ 116 char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
117 get_cpu_var(iucv_dbf_txt_buf); \ 117 sprintf(__buf, text); \
118 sprintf(iucv_dbf_txt_buf, text); \ 118 debug_text_event(iucv_dbf_##name, level, __buf); \
119 debug_text_event(iucv_dbf_##name, level, \
120 iucv_dbf_txt_buf); \
121 put_cpu_var(iucv_dbf_txt_buf); \ 119 put_cpu_var(iucv_dbf_txt_buf); \
122 } \ 120 } \
123 } while (0) 121 } while (0)
@@ -161,7 +159,7 @@ static void netiucv_pm_complete(struct device *);
161static int netiucv_pm_freeze(struct device *); 159static int netiucv_pm_freeze(struct device *);
162static int netiucv_pm_restore_thaw(struct device *); 160static int netiucv_pm_restore_thaw(struct device *);
163 161
164static struct dev_pm_ops netiucv_pm_ops = { 162static const struct dev_pm_ops netiucv_pm_ops = {
165 .prepare = netiucv_pm_prepare, 163 .prepare = netiucv_pm_prepare,
166 .complete = netiucv_pm_complete, 164 .complete = netiucv_pm_complete,
167 .freeze = netiucv_pm_freeze, 165 .freeze = netiucv_pm_freeze,
@@ -741,13 +739,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
741 if (single_flag) { 739 if (single_flag) {
742 if ((skb = skb_dequeue(&conn->commit_queue))) { 740 if ((skb = skb_dequeue(&conn->commit_queue))) {
743 atomic_dec(&skb->users); 741 atomic_dec(&skb->users);
744 dev_kfree_skb_any(skb);
745 if (privptr) { 742 if (privptr) {
746 privptr->stats.tx_packets++; 743 privptr->stats.tx_packets++;
747 privptr->stats.tx_bytes += 744 privptr->stats.tx_bytes +=
748 (skb->len - NETIUCV_HDRLEN 745 (skb->len - NETIUCV_HDRLEN
749 - NETIUCV_HDRLEN); 746 - NETIUCV_HDRLEN);
750 } 747 }
748 dev_kfree_skb_any(skb);
751 } 749 }
752 } 750 }
753 conn->tx_buff->data = conn->tx_buff->head; 751 conn->tx_buff->data = conn->tx_buff->head;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 31a2b4e502ce..b232693378cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -122,7 +122,6 @@ struct qeth_perf_stats {
122 __u64 outbound_do_qdio_start_time; 122 __u64 outbound_do_qdio_start_time;
123 unsigned int outbound_do_qdio_cnt; 123 unsigned int outbound_do_qdio_cnt;
124 unsigned int outbound_do_qdio_time; 124 unsigned int outbound_do_qdio_time;
125 /* eddp data */
126 unsigned int large_send_bytes; 125 unsigned int large_send_bytes;
127 unsigned int large_send_cnt; 126 unsigned int large_send_cnt;
128 unsigned int sg_skbs_sent; 127 unsigned int sg_skbs_sent;
@@ -135,6 +134,7 @@ struct qeth_perf_stats {
135 unsigned int sg_frags_rx; 134 unsigned int sg_frags_rx;
136 unsigned int sg_alloc_page_rx; 135 unsigned int sg_alloc_page_rx;
137 unsigned int tx_csum; 136 unsigned int tx_csum;
137 unsigned int tx_lin;
138}; 138};
139 139
140/* Routing stuff */ 140/* Routing stuff */
@@ -648,6 +648,7 @@ struct qeth_card_options {
648 enum qeth_large_send_types large_send; 648 enum qeth_large_send_types large_send;
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation;
651}; 652};
652 653
653/* 654/*
@@ -776,7 +777,6 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 777 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777} 778}
778 779
779struct qeth_eddp_context;
780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
782const char *qeth_get_cardname_short(struct qeth_card *); 782const char *qeth_get_cardname_short(struct qeth_card *);
@@ -836,7 +836,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); 836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
837int qeth_mdio_read(struct net_device *, int, int); 837int qeth_mdio_read(struct net_device *, int, int);
838int qeth_snmp_command(struct qeth_card *, char __user *); 838int qeth_snmp_command(struct qeth_card *, char __user *);
839int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
840struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); 839struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
841int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, 840int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
842 unsigned long); 841 unsigned long);
@@ -849,13 +848,14 @@ int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
849 struct sk_buff *, struct qeth_hdr *, int, int, int); 848 struct sk_buff *, struct qeth_hdr *, int, int, int);
850int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, 849int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
851 struct sk_buff *, struct qeth_hdr *, int); 850 struct sk_buff *, struct qeth_hdr *, int);
852int qeth_core_get_stats_count(struct net_device *); 851int qeth_core_get_sset_count(struct net_device *, int);
853void qeth_core_get_ethtool_stats(struct net_device *, 852void qeth_core_get_ethtool_stats(struct net_device *,
854 struct ethtool_stats *, u64 *); 853 struct ethtool_stats *, u64 *);
855void qeth_core_get_strings(struct net_device *, u32, u8 *); 854void qeth_core_get_strings(struct net_device *, u32, u8 *);
856void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 855void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
857void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); 856void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
858int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 857int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
858int qeth_set_access_ctrl_online(struct qeth_card *card);
859 859
860/* exports for OSN */ 860/* exports for OSN */
861int qeth_osn_assist(struct net_device *, void *, int); 861int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c4a42d970158..d34804d5ece1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -270,41 +270,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
270 return qeth_alloc_buffer_pool(card); 270 return qeth_alloc_buffer_pool(card);
271} 271}
272 272
273int qeth_set_large_send(struct qeth_card *card,
274 enum qeth_large_send_types type)
275{
276 int rc = 0;
277
278 if (card->dev == NULL) {
279 card->options.large_send = type;
280 return 0;
281 }
282 if (card->state == CARD_STATE_UP)
283 netif_tx_disable(card->dev);
284 card->options.large_send = type;
285 switch (card->options.large_send) {
286 case QETH_LARGE_SEND_TSO:
287 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
288 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
289 NETIF_F_HW_CSUM;
290 } else {
291 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM);
293 card->options.large_send = QETH_LARGE_SEND_NO;
294 rc = -EOPNOTSUPP;
295 }
296 break;
297 default: /* includes QETH_LARGE_SEND_NO */
298 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
299 NETIF_F_HW_CSUM);
300 break;
301 }
302 if (card->state == CARD_STATE_UP)
303 netif_wake_queue(card->dev);
304 return rc;
305}
306EXPORT_SYMBOL_GPL(qeth_set_large_send);
307
308static int qeth_issue_next_read(struct qeth_card *card) 273static int qeth_issue_next_read(struct qeth_card *card)
309{ 274{
310 int rc; 275 int rc;
@@ -1079,6 +1044,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
1079 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1044 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1080 card->options.performance_stats = 0; 1045 card->options.performance_stats = 0;
1081 card->options.rx_sg_cb = QETH_RX_SG_CB; 1046 card->options.rx_sg_cb = QETH_RX_SG_CB;
1047 card->options.isolation = ISOLATION_MODE_NONE;
1082} 1048}
1083 1049
1084static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1050static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -3389,6 +3355,156 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3389} 3355}
3390EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 3356EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3391 3357
3358static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3359 struct qeth_reply *reply, unsigned long data)
3360{
3361 struct qeth_ipa_cmd *cmd;
3362 struct qeth_set_access_ctrl *access_ctrl_req;
3363 int rc;
3364
3365 QETH_DBF_TEXT(TRACE, 4, "setaccb");
3366
3367 cmd = (struct qeth_ipa_cmd *) data;
3368 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3369 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3370 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3371 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3372 cmd->data.setadapterparms.hdr.return_code);
3373 switch (cmd->data.setadapterparms.hdr.return_code) {
3374 case SET_ACCESS_CTRL_RC_SUCCESS:
3375 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3376 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3377 {
3378 card->options.isolation = access_ctrl_req->subcmd_code;
3379 if (card->options.isolation == ISOLATION_MODE_NONE) {
3380 dev_info(&card->gdev->dev,
3381 "QDIO data connection isolation is deactivated\n");
3382 } else {
3383 dev_info(&card->gdev->dev,
3384 "QDIO data connection isolation is activated\n");
3385 }
3386 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3387 card->gdev->dev.kobj.name,
3388 access_ctrl_req->subcmd_code,
3389 cmd->data.setadapterparms.hdr.return_code);
3390 rc = 0;
3391 break;
3392 }
3393 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3394 {
3395 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3396 card->gdev->dev.kobj.name,
3397 access_ctrl_req->subcmd_code,
3398 cmd->data.setadapterparms.hdr.return_code);
3399 dev_err(&card->gdev->dev, "Adapter does not "
3400 "support QDIO data connection isolation\n");
3401
3402 /* ensure isolation mode is "none" */
3403 card->options.isolation = ISOLATION_MODE_NONE;
3404 rc = -EOPNOTSUPP;
3405 break;
3406 }
3407 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3408 {
3409 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3410 card->gdev->dev.kobj.name,
3411 access_ctrl_req->subcmd_code,
3412 cmd->data.setadapterparms.hdr.return_code);
3413 dev_err(&card->gdev->dev,
3414 "Adapter is dedicated. "
3415 "QDIO data connection isolation not supported\n");
3416
3417 /* ensure isolation mode is "none" */
3418 card->options.isolation = ISOLATION_MODE_NONE;
3419 rc = -EOPNOTSUPP;
3420 break;
3421 }
3422 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3423 {
3424 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3425 card->gdev->dev.kobj.name,
3426 access_ctrl_req->subcmd_code,
3427 cmd->data.setadapterparms.hdr.return_code);
3428 dev_err(&card->gdev->dev,
3429 "TSO does not permit QDIO data connection isolation\n");
3430
3431 /* ensure isolation mode is "none" */
3432 card->options.isolation = ISOLATION_MODE_NONE;
3433 rc = -EPERM;
3434 break;
3435 }
3436 default:
3437 {
3438 /* this should never happen */
3439 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3440 "==UNKNOWN\n",
3441 card->gdev->dev.kobj.name,
3442 access_ctrl_req->subcmd_code,
3443 cmd->data.setadapterparms.hdr.return_code);
3444
3445 /* ensure isolation mode is "none" */
3446 card->options.isolation = ISOLATION_MODE_NONE;
3447 rc = 0;
3448 break;
3449 }
3450 }
3451 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3452 return rc;
3453}
3454
3455static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3456 enum qeth_ipa_isolation_modes isolation)
3457{
3458 int rc;
3459 struct qeth_cmd_buffer *iob;
3460 struct qeth_ipa_cmd *cmd;
3461 struct qeth_set_access_ctrl *access_ctrl_req;
3462
3463 QETH_DBF_TEXT(TRACE, 4, "setacctl");
3464
3465 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3466 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3467
3468 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3469 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3470 sizeof(struct qeth_set_access_ctrl));
3471 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3472 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3473 access_ctrl_req->subcmd_code = isolation;
3474
3475 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3476 NULL);
3477 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3478 return rc;
3479}
3480
3481int qeth_set_access_ctrl_online(struct qeth_card *card)
3482{
3483 int rc = 0;
3484
3485 QETH_DBF_TEXT(TRACE, 4, "setactlo");
3486
3487 if (card->info.type == QETH_CARD_TYPE_OSAE &&
3488 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
3489 rc = qeth_setadpparms_set_access_ctrl(card,
3490 card->options.isolation);
3491 if (rc) {
3492 QETH_DBF_MESSAGE(3,
3493 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
3494 card->gdev->dev.kobj.name,
3495 rc);
3496 }
3497 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3498 card->options.isolation = ISOLATION_MODE_NONE;
3499
3500 dev_err(&card->gdev->dev, "Adapter does not "
3501 "support QDIO data connection isolation\n");
3502 rc = -EOPNOTSUPP;
3503 }
3504 return rc;
3505}
3506EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3507
3392void qeth_tx_timeout(struct net_device *dev) 3508void qeth_tx_timeout(struct net_device *dev)
3393{ 3509{
3394 struct qeth_card *card; 3510 struct qeth_card *card;
@@ -3732,30 +3848,36 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3732int qeth_core_hardsetup_card(struct qeth_card *card) 3848int qeth_core_hardsetup_card(struct qeth_card *card)
3733{ 3849{
3734 struct qdio_ssqd_desc *ssqd; 3850 struct qdio_ssqd_desc *ssqd;
3735 int retries = 3; 3851 int retries = 0;
3736 int mpno = 0; 3852 int mpno = 0;
3737 int rc; 3853 int rc;
3738 3854
3739 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3855 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3740 atomic_set(&card->force_alloc_skb, 0); 3856 atomic_set(&card->force_alloc_skb, 0);
3741retry: 3857retry:
3742 if (retries < 3) { 3858 if (retries)
3743 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3859 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
3744 dev_name(&card->gdev->dev)); 3860 dev_name(&card->gdev->dev));
3745 ccw_device_set_offline(CARD_DDEV(card)); 3861 ccw_device_set_offline(CARD_DDEV(card));
3746 ccw_device_set_offline(CARD_WDEV(card)); 3862 ccw_device_set_offline(CARD_WDEV(card));
3747 ccw_device_set_offline(CARD_RDEV(card)); 3863 ccw_device_set_offline(CARD_RDEV(card));
3748 ccw_device_set_online(CARD_RDEV(card)); 3864 rc = ccw_device_set_online(CARD_RDEV(card));
3749 ccw_device_set_online(CARD_WDEV(card)); 3865 if (rc)
3750 ccw_device_set_online(CARD_DDEV(card)); 3866 goto retriable;
3751 } 3867 rc = ccw_device_set_online(CARD_WDEV(card));
3868 if (rc)
3869 goto retriable;
3870 rc = ccw_device_set_online(CARD_DDEV(card));
3871 if (rc)
3872 goto retriable;
3752 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 3873 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3874retriable:
3753 if (rc == -ERESTARTSYS) { 3875 if (rc == -ERESTARTSYS) {
3754 QETH_DBF_TEXT(SETUP, 2, "break1"); 3876 QETH_DBF_TEXT(SETUP, 2, "break1");
3755 return rc; 3877 return rc;
3756 } else if (rc) { 3878 } else if (rc) {
3757 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3758 if (--retries < 0) 3880 if (++retries > 3)
3759 goto out; 3881 goto out;
3760 else 3882 else
3761 goto retry; 3883 goto retry;
@@ -4303,13 +4425,19 @@ static struct {
4303 {"tx do_QDIO time"}, 4425 {"tx do_QDIO time"},
4304 {"tx do_QDIO count"}, 4426 {"tx do_QDIO count"},
4305 {"tx csum"}, 4427 {"tx csum"},
4428 {"tx lin"},
4306}; 4429};
4307 4430
4308int qeth_core_get_stats_count(struct net_device *dev) 4431int qeth_core_get_sset_count(struct net_device *dev, int stringset)
4309{ 4432{
4310 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); 4433 switch (stringset) {
4434 case ETH_SS_STATS:
4435 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4436 default:
4437 return -EINVAL;
4438 }
4311} 4439}
4312EXPORT_SYMBOL_GPL(qeth_core_get_stats_count); 4440EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
4313 4441
4314void qeth_core_get_ethtool_stats(struct net_device *dev, 4442void qeth_core_get_ethtool_stats(struct net_device *dev,
4315 struct ethtool_stats *stats, u64 *data) 4443 struct ethtool_stats *stats, u64 *data)
@@ -4355,6 +4483,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
4355 data[31] = card->perf_stats.outbound_do_qdio_time; 4483 data[31] = card->perf_stats.outbound_do_qdio_time;
4356 data[32] = card->perf_stats.outbound_do_qdio_cnt; 4484 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4357 data[33] = card->perf_stats.tx_csum; 4485 data[33] = card->perf_stats.tx_csum;
4486 data[34] = card->perf_stats.tx_lin;
4358} 4487}
4359EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 4488EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4360 4489
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index eecb2ee62e85..1ba51152f667 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -234,18 +234,19 @@ enum qeth_ipa_setdelip_flags {
234 234
235/* SETADAPTER IPA Command: ****************************************************/ 235/* SETADAPTER IPA Command: ****************************************************/
236enum qeth_ipa_setadp_cmd { 236enum qeth_ipa_setadp_cmd {
237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001, 237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L,
238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002, 238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L,
239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004, 239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L,
240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008, 240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L,
241 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010, 241 IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L,
242 IPA_SETADP_SET_CONFIG_PARMS = 0x0020, 242 IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L,
243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040, 243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L,
244 IPA_SETADP_SET_BROADCAST_MODE = 0x0080, 244 IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L,
245 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 245 IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L,
246 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 246 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 247 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x0800, 248 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
249 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
249}; 250};
250enum qeth_ipa_mac_ops { 251enum qeth_ipa_mac_ops {
251 CHANGE_ADDR_READ_MAC = 0, 252 CHANGE_ADDR_READ_MAC = 0,
@@ -264,6 +265,20 @@ enum qeth_ipa_promisc_modes {
264 SET_PROMISC_MODE_OFF = 0, 265 SET_PROMISC_MODE_OFF = 0,
265 SET_PROMISC_MODE_ON = 1, 266 SET_PROMISC_MODE_ON = 1,
266}; 267};
268enum qeth_ipa_isolation_modes {
269 ISOLATION_MODE_NONE = 0x00000000L,
270 ISOLATION_MODE_FWD = 0x00000001L,
271 ISOLATION_MODE_DROP = 0x00000002L,
272};
273enum qeth_ipa_set_access_mode_rc {
274 SET_ACCESS_CTRL_RC_SUCCESS = 0x0000,
275 SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004,
276 SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008,
277 SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
278 SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
279 SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
280};
281
267 282
268/* (SET)DELIP(M) IPA stuff ***************************************************/ 283/* (SET)DELIP(M) IPA stuff ***************************************************/
269struct qeth_ipacmd_setdelip4 { 284struct qeth_ipacmd_setdelip4 {
@@ -376,6 +391,11 @@ struct qeth_snmp_ureq {
376 struct qeth_snmp_cmd cmd; 391 struct qeth_snmp_cmd cmd;
377} __attribute__((packed)); 392} __attribute__((packed));
378 393
394/* SET_ACCESS_CONTROL: same format for request and reply */
395struct qeth_set_access_ctrl {
396 __u32 subcmd_code;
397} __attribute__((packed));
398
379struct qeth_ipacmd_setadpparms_hdr { 399struct qeth_ipacmd_setadpparms_hdr {
380 __u32 supp_hw_cmds; 400 __u32 supp_hw_cmds;
381 __u32 reserved1; 401 __u32 reserved1;
@@ -394,6 +414,7 @@ struct qeth_ipacmd_setadpparms {
394 struct qeth_query_cmds_supp query_cmds_supp; 414 struct qeth_query_cmds_supp query_cmds_supp;
395 struct qeth_change_addr change_addr; 415 struct qeth_change_addr change_addr;
396 struct qeth_snmp_cmd snmp; 416 struct qeth_snmp_cmd snmp;
417 struct qeth_set_access_ctrl set_access_ctrl;
397 __u32 mode; 418 __u32 mode;
398 } data; 419 } data;
399} __attribute__ ((packed)); 420} __attribute__ ((packed));
@@ -507,7 +528,7 @@ extern unsigned char ULP_ENABLE[];
507 (PDU_ENCAPSULATION(buffer) + 0x17) 528 (PDU_ENCAPSULATION(buffer) + 0x17)
508#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \ 529#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
509 (PDU_ENCAPSULATION(buffer) + 0x2b) 530 (PDU_ENCAPSULATION(buffer) + 0x2b)
510/* Layer 2 defintions */ 531/* Layer 2 definitions */
511#define QETH_PROT_LAYER2 0x08 532#define QETH_PROT_LAYER2 0x08
512#define QETH_PROT_TCPIP 0x03 533#define QETH_PROT_TCPIP 0x03
513#define QETH_PROT_OSN2 0x0a 534#define QETH_PROT_OSN2 0x0a
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 33505c2a0e3a..9ff2b36fdc43 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -416,7 +416,11 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, 416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
417 qeth_dev_layer2_store); 417 qeth_dev_layer2_store);
418 418
419static ssize_t qeth_dev_large_send_show(struct device *dev, 419#define ATTR_QETH_ISOLATION_NONE ("none")
420#define ATTR_QETH_ISOLATION_FWD ("forward")
421#define ATTR_QETH_ISOLATION_DROP ("drop")
422
423static ssize_t qeth_dev_isolation_show(struct device *dev,
420 struct device_attribute *attr, char *buf) 424 struct device_attribute *attr, char *buf)
421{ 425{
422 struct qeth_card *card = dev_get_drvdata(dev); 426 struct qeth_card *card = dev_get_drvdata(dev);
@@ -424,44 +428,69 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
424 if (!card) 428 if (!card)
425 return -EINVAL; 429 return -EINVAL;
426 430
427 switch (card->options.large_send) { 431 switch (card->options.isolation) {
428 case QETH_LARGE_SEND_NO: 432 case ISOLATION_MODE_NONE:
429 return sprintf(buf, "%s\n", "no"); 433 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
430 case QETH_LARGE_SEND_TSO: 434 case ISOLATION_MODE_FWD:
431 return sprintf(buf, "%s\n", "TSO"); 435 return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
436 case ISOLATION_MODE_DROP:
437 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
432 default: 438 default:
433 return sprintf(buf, "%s\n", "N/A"); 439 return snprintf(buf, 5, "%s\n", "N/A");
434 } 440 }
435} 441}
436 442
437static ssize_t qeth_dev_large_send_store(struct device *dev, 443static ssize_t qeth_dev_isolation_store(struct device *dev,
438 struct device_attribute *attr, const char *buf, size_t count) 444 struct device_attribute *attr, const char *buf, size_t count)
439{ 445{
440 struct qeth_card *card = dev_get_drvdata(dev); 446 struct qeth_card *card = dev_get_drvdata(dev);
441 enum qeth_large_send_types type; 447 enum qeth_ipa_isolation_modes isolation;
442 int rc = 0; 448 int rc = 0;
443 char *tmp; 449 char *tmp, *curtoken;
450 curtoken = (char *) buf;
444 451
445 if (!card) 452 if (!card) {
446 return -EINVAL; 453 rc = -EINVAL;
447 tmp = strsep((char **) &buf, "\n"); 454 goto out;
448 if (!strcmp(tmp, "no")) { 455 }
449 type = QETH_LARGE_SEND_NO; 456
450 } else if (!strcmp(tmp, "TSO")) { 457 /* check for unknown, too, in case we do not yet know who we are */
451 type = QETH_LARGE_SEND_TSO; 458 if (card->info.type != QETH_CARD_TYPE_OSAE &&
459 card->info.type != QETH_CARD_TYPE_UNKNOWN) {
460 rc = -EOPNOTSUPP;
461 dev_err(&card->gdev->dev, "Adapter does not "
462 "support QDIO data connection isolation\n");
463 goto out;
464 }
465
466 /* parse input into isolation mode */
467 tmp = strsep(&curtoken, "\n");
468 if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
469 isolation = ISOLATION_MODE_NONE;
470 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
471 isolation = ISOLATION_MODE_FWD;
472 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
473 isolation = ISOLATION_MODE_DROP;
452 } else { 474 } else {
453 return -EINVAL; 475 rc = -EINVAL;
476 goto out;
454 } 477 }
455 if (card->options.large_send == type) 478 rc = count;
456 return count; 479
457 rc = qeth_set_large_send(card, type); 480 /* defer IP assist if device is offline (until discipline->set_online)*/
458 if (rc) 481 card->options.isolation = isolation;
459 return rc; 482 if (card->state == CARD_STATE_SOFTSETUP ||
460 return count; 483 card->state == CARD_STATE_UP) {
484 int ipa_rc = qeth_set_access_ctrl_online(card);
485 if (ipa_rc != 0)
486 rc = ipa_rc;
487 }
488out:
489 return rc;
461} 490}
462 491
463static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, 492static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
464 qeth_dev_large_send_store); 493 qeth_dev_isolation_store);
465 494
466static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 495static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
467{ 496{
@@ -582,7 +611,7 @@ static struct attribute *qeth_device_attrs[] = {
582 &dev_attr_recover.attr, 611 &dev_attr_recover.attr,
583 &dev_attr_performance_stats.attr, 612 &dev_attr_performance_stats.attr,
584 &dev_attr_layer2.attr, 613 &dev_attr_layer2.attr,
585 &dev_attr_large_send.attr, 614 &dev_attr_isolation.attr,
586 NULL, 615 NULL,
587}; 616};
588 617
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f4f3ca1393b2..0b763396d5d1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -866,7 +866,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
866 .get_link = ethtool_op_get_link, 866 .get_link = ethtool_op_get_link,
867 .get_strings = qeth_core_get_strings, 867 .get_strings = qeth_core_get_strings,
868 .get_ethtool_stats = qeth_core_get_ethtool_stats, 868 .get_ethtool_stats = qeth_core_get_ethtool_stats,
869 .get_stats_count = qeth_core_get_stats_count, 869 .get_sset_count = qeth_core_get_sset_count,
870 .get_drvinfo = qeth_core_get_drvinfo, 870 .get_drvinfo = qeth_core_get_drvinfo,
871 .get_settings = qeth_core_ethtool_get_settings, 871 .get_settings = qeth_core_ethtool_get_settings,
872}; 872};
@@ -874,7 +874,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
874static const struct ethtool_ops qeth_l2_osn_ops = { 874static const struct ethtool_ops qeth_l2_osn_ops = {
875 .get_strings = qeth_core_get_strings, 875 .get_strings = qeth_core_get_strings,
876 .get_ethtool_stats = qeth_core_get_ethtool_stats, 876 .get_ethtool_stats = qeth_core_get_ethtool_stats,
877 .get_stats_count = qeth_core_get_stats_count, 877 .get_sset_count = qeth_core_get_sset_count,
878 .get_drvinfo = qeth_core_get_drvinfo, 878 .get_drvinfo = qeth_core_get_drvinfo,
879}; 879};
880 880
@@ -940,30 +940,17 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
940 940
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 942 recover_flag = card->state;
943 rc = ccw_device_set_online(CARD_RDEV(card));
944 if (rc) {
945 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
946 return -EIO;
947 }
948 rc = ccw_device_set_online(CARD_WDEV(card));
949 if (rc) {
950 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
951 return -EIO;
952 }
953 rc = ccw_device_set_online(CARD_DDEV(card));
954 if (rc) {
955 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
956 return -EIO;
957 }
958
959 rc = qeth_core_hardsetup_card(card); 943 rc = qeth_core_hardsetup_card(card);
960 if (rc) { 944 if (rc) {
961 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 945 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
946 rc = -ENODEV;
962 goto out_remove; 947 goto out_remove;
963 } 948 }
964 949
965 if (!card->dev && qeth_l2_setup_netdev(card)) 950 if (!card->dev && qeth_l2_setup_netdev(card)) {
951 rc = -ENODEV;
966 goto out_remove; 952 goto out_remove;
953 }
967 954
968 if (card->info.type != QETH_CARD_TYPE_OSN) 955 if (card->info.type != QETH_CARD_TYPE_OSN)
969 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 956 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
@@ -983,12 +970,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
983 card->lan_online = 0; 970 card->lan_online = 0;
984 return 0; 971 return 0;
985 } 972 }
973 rc = -ENODEV;
986 goto out_remove; 974 goto out_remove;
987 } else 975 } else
988 card->lan_online = 1; 976 card->lan_online = 1;
989 977
990 if (card->info.type != QETH_CARD_TYPE_OSN) { 978 if (card->info.type != QETH_CARD_TYPE_OSN) {
991 qeth_set_large_send(card, card->options.large_send); 979 /* configure isolation level */
980 qeth_set_access_ctrl_online(card);
992 qeth_l2_process_vlans(card, 0); 981 qeth_l2_process_vlans(card, 0);
993 } 982 }
994 983
@@ -997,6 +986,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
997 rc = qeth_init_qdio_queues(card); 986 rc = qeth_init_qdio_queues(card);
998 if (rc) { 987 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 988 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
989 rc = -ENODEV;
1000 goto out_remove; 990 goto out_remove;
1001 } 991 }
1002 card->state = CARD_STATE_SOFTSETUP; 992 card->state = CARD_STATE_SOFTSETUP;
@@ -1018,6 +1008,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1018 /* let user_space know that device is online */ 1008 /* let user_space know that device is online */
1019 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1009 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1020 return 0; 1010 return 0;
1011
1021out_remove: 1012out_remove:
1022 card->use_hard_stop = 1; 1013 card->use_hard_stop = 1;
1023 qeth_l2_stop_card(card, 0); 1014 qeth_l2_stop_card(card, 0);
@@ -1028,7 +1019,7 @@ out_remove:
1028 card->state = CARD_STATE_RECOVER; 1019 card->state = CARD_STATE_RECOVER;
1029 else 1020 else
1030 card->state = CARD_STATE_DOWN; 1021 card->state = CARD_STATE_DOWN;
1031 return -ENODEV; 1022 return rc;
1032} 1023}
1033 1024
1034static int qeth_l2_set_online(struct ccwgroup_device *gdev) 1025static int qeth_l2_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9f143c83bba3..321988fa9f7d 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -60,5 +60,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
62 const u8 *); 62 const u8 *);
63int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
64int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
63 65
64#endif /* __QETH_L3_H__ */ 66#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 073b6d354915..fd1b6ed3721f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -41,6 +41,32 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41static int __qeth_l3_set_online(struct ccwgroup_device *, int); 41static int __qeth_l3_set_online(struct ccwgroup_device *, int);
42static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 42static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
43 43
44int qeth_l3_set_large_send(struct qeth_card *card,
45 enum qeth_large_send_types type)
46{
47 int rc = 0;
48
49 card->options.large_send = type;
50 if (card->dev == NULL)
51 return 0;
52
53 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
54 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
55 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
56 NETIF_F_HW_CSUM;
57 } else {
58 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
59 NETIF_F_HW_CSUM);
60 card->options.large_send = QETH_LARGE_SEND_NO;
61 rc = -EOPNOTSUPP;
62 }
63 } else {
64 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
65 NETIF_F_HW_CSUM);
66 card->options.large_send = QETH_LARGE_SEND_NO;
67 }
68 return rc;
69}
44 70
45static int qeth_l3_isxdigit(char *buf) 71static int qeth_l3_isxdigit(char *buf)
46{ 72{
@@ -1439,6 +1465,35 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1439 return 0; 1465 return 0;
1440} 1466}
1441 1467
1468int qeth_l3_set_rx_csum(struct qeth_card *card,
1469 enum qeth_checksum_types csum_type)
1470{
1471 int rc = 0;
1472
1473 if (card->options.checksum_type == HW_CHECKSUMMING) {
1474 if ((csum_type != HW_CHECKSUMMING) &&
1475 (card->state != CARD_STATE_DOWN)) {
1476 rc = qeth_l3_send_simple_setassparms(card,
1477 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1478 if (rc)
1479 return -EIO;
1480 }
1481 } else {
1482 if (csum_type == HW_CHECKSUMMING) {
1483 if (card->state != CARD_STATE_DOWN) {
1484 if (!qeth_is_supported(card,
1485 IPA_INBOUND_CHECKSUM))
1486 return -EPERM;
1487 rc = qeth_l3_send_checksum_command(card);
1488 if (rc)
1489 return -EIO;
1490 }
1491 }
1492 }
1493 card->options.checksum_type = csum_type;
1494 return rc;
1495}
1496
1442static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1497static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1443{ 1498{
1444 int rc = 0; 1499 int rc = 0;
@@ -1506,6 +1561,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1506static int qeth_l3_start_ipassists(struct qeth_card *card) 1561static int qeth_l3_start_ipassists(struct qeth_card *card)
1507{ 1562{
1508 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1563 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1564
1565 qeth_set_access_ctrl_online(card); /* go on*/
1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1566 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1567 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1511 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1568 qeth_l3_start_ipa_source_mac(card); /* go on*/
@@ -2684,6 +2741,24 @@ static void qeth_tx_csum(struct sk_buff *skb)
2684 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2741 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2685} 2742}
2686 2743
2744static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2745{
2746 unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
2747 tcp_hdr(skb)->doff * 4;
2748 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2749 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
2750 elements += skb_shinfo(skb)->nr_frags;
2751 return elements;
2752}
2753
2754static inline int qeth_l3_tso_check(struct sk_buff *skb)
2755{
2756 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
2757 (unsigned long)skb->data;
2758 return (((unsigned long)skb->data & PAGE_MASK) !=
2759 (((unsigned long)skb->data + len) & PAGE_MASK));
2760}
2761
2687static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2762static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2688{ 2763{
2689 int rc; 2764 int rc;
@@ -2777,16 +2852,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2777 /* fix hardware limitation: as long as we do not have sbal 2852 /* fix hardware limitation: as long as we do not have sbal
2778 * chaining we can not send long frag lists 2853 * chaining we can not send long frag lists
2779 */ 2854 */
2780 if ((large_send == QETH_LARGE_SEND_TSO) && 2855 if (large_send == QETH_LARGE_SEND_TSO) {
2781 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2856 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
2782 if (skb_linearize(new_skb)) 2857 if (skb_linearize(new_skb))
2783 goto tx_drop; 2858 goto tx_drop;
2859 if (card->options.performance_stats)
2860 card->perf_stats.tx_lin++;
2861 }
2784 } 2862 }
2785 2863
2786 if ((large_send == QETH_LARGE_SEND_TSO) && 2864 if ((large_send == QETH_LARGE_SEND_TSO) &&
2787 (cast_type == RTN_UNSPEC)) { 2865 (cast_type == RTN_UNSPEC)) {
2788 hdr = (struct qeth_hdr *)skb_push(new_skb, 2866 hdr = (struct qeth_hdr *)skb_push(new_skb,
2789 sizeof(struct qeth_hdr_tso)); 2867 sizeof(struct qeth_hdr_tso));
2868 if (qeth_l3_tso_check(new_skb))
2869 QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
2790 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2870 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2791 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2871 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2792 qeth_tso_fill_header(card, hdr, new_skb); 2872 qeth_tso_fill_header(card, hdr, new_skb);
@@ -2903,46 +2983,28 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2903static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2983static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2904{ 2984{
2905 struct qeth_card *card = dev->ml_priv; 2985 struct qeth_card *card = dev->ml_priv;
2906 enum qeth_card_states old_state;
2907 enum qeth_checksum_types csum_type; 2986 enum qeth_checksum_types csum_type;
2908 2987
2909 if ((card->state != CARD_STATE_UP) &&
2910 (card->state != CARD_STATE_DOWN))
2911 return -EPERM;
2912
2913 if (data) 2988 if (data)
2914 csum_type = HW_CHECKSUMMING; 2989 csum_type = HW_CHECKSUMMING;
2915 else 2990 else
2916 csum_type = SW_CHECKSUMMING; 2991 csum_type = SW_CHECKSUMMING;
2917 2992
2918 if (card->options.checksum_type != csum_type) { 2993 return qeth_l3_set_rx_csum(card, csum_type);
2919 old_state = card->state;
2920 if (card->state == CARD_STATE_UP)
2921 __qeth_l3_set_offline(card->gdev, 1);
2922 card->options.checksum_type = csum_type;
2923 if (old_state == CARD_STATE_UP)
2924 __qeth_l3_set_online(card->gdev, 1);
2925 }
2926 return 0;
2927} 2994}
2928 2995
2929static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2996static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2930{ 2997{
2931 struct qeth_card *card = dev->ml_priv; 2998 struct qeth_card *card = dev->ml_priv;
2999 int rc = 0;
2932 3000
2933 if (data) { 3001 if (data) {
2934 if (card->options.large_send == QETH_LARGE_SEND_NO) { 3002 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
2935 if (card->info.type == QETH_CARD_TYPE_IQD)
2936 return -EPERM;
2937 else
2938 card->options.large_send = QETH_LARGE_SEND_TSO;
2939 dev->features |= NETIF_F_TSO;
2940 }
2941 } else { 3003 } else {
2942 dev->features &= ~NETIF_F_TSO; 3004 dev->features &= ~NETIF_F_TSO;
2943 card->options.large_send = QETH_LARGE_SEND_NO; 3005 card->options.large_send = QETH_LARGE_SEND_NO;
2944 } 3006 }
2945 return 0; 3007 return rc;
2946} 3008}
2947 3009
2948static const struct ethtool_ops qeth_l3_ethtool_ops = { 3010static const struct ethtool_ops qeth_l3_ethtool_ops = {
@@ -2957,7 +3019,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = {
2957 .set_tso = qeth_l3_ethtool_set_tso, 3019 .set_tso = qeth_l3_ethtool_set_tso,
2958 .get_strings = qeth_core_get_strings, 3020 .get_strings = qeth_core_get_strings,
2959 .get_ethtool_stats = qeth_core_get_ethtool_stats, 3021 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2960 .get_stats_count = qeth_core_get_stats_count, 3022 .get_sset_count = qeth_core_get_sset_count,
2961 .get_drvinfo = qeth_core_get_drvinfo, 3023 .get_drvinfo = qeth_core_get_drvinfo,
2962 .get_settings = qeth_core_ethtool_get_settings, 3024 .get_settings = qeth_core_ethtool_get_settings,
2963}; 3025};
@@ -3058,6 +3120,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3058 NETIF_F_HW_VLAN_RX | 3120 NETIF_F_HW_VLAN_RX |
3059 NETIF_F_HW_VLAN_FILTER; 3121 NETIF_F_HW_VLAN_FILTER;
3060 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3122 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
3123 card->dev->gso_max_size = 15 * PAGE_SIZE;
3061 3124
3062 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3125 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3063 return register_netdev(card->dev); 3126 return register_netdev(card->dev);
@@ -3154,32 +3217,19 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3154 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3155 3218
3156 recover_flag = card->state; 3219 recover_flag = card->state;
3157 rc = ccw_device_set_online(CARD_RDEV(card));
3158 if (rc) {
3159 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3160 return -EIO;
3161 }
3162 rc = ccw_device_set_online(CARD_WDEV(card));
3163 if (rc) {
3164 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3165 return -EIO;
3166 }
3167 rc = ccw_device_set_online(CARD_DDEV(card));
3168 if (rc) {
3169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3170 return -EIO;
3171 }
3172
3173 rc = qeth_core_hardsetup_card(card); 3220 rc = qeth_core_hardsetup_card(card);
3174 if (rc) { 3221 if (rc) {
3175 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3222 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3223 rc = -ENODEV;
3176 goto out_remove; 3224 goto out_remove;
3177 } 3225 }
3178 3226
3179 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3227 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3180 3228
3181 if (!card->dev && qeth_l3_setup_netdev(card)) 3229 if (!card->dev && qeth_l3_setup_netdev(card)) {
3230 rc = -ENODEV;
3182 goto out_remove; 3231 goto out_remove;
3232 }
3183 3233
3184 card->state = CARD_STATE_HARDSETUP; 3234 card->state = CARD_STATE_HARDSETUP;
3185 qeth_print_status_message(card); 3235 qeth_print_status_message(card);
@@ -3196,10 +3246,11 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3196 card->lan_online = 0; 3246 card->lan_online = 0;
3197 return 0; 3247 return 0;
3198 } 3248 }
3249 rc = -ENODEV;
3199 goto out_remove; 3250 goto out_remove;
3200 } else 3251 } else
3201 card->lan_online = 1; 3252 card->lan_online = 1;
3202 qeth_set_large_send(card, card->options.large_send); 3253 qeth_l3_set_large_send(card, card->options.large_send);
3203 3254
3204 rc = qeth_l3_setadapter_parms(card); 3255 rc = qeth_l3_setadapter_parms(card);
3205 if (rc) 3256 if (rc)
@@ -3218,6 +3269,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3218 rc = qeth_init_qdio_queues(card); 3269 rc = qeth_init_qdio_queues(card);
3219 if (rc) { 3270 if (rc) {
3220 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3271 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3272 rc = -ENODEV;
3221 goto out_remove; 3273 goto out_remove;
3222 } 3274 }
3223 card->state = CARD_STATE_SOFTSETUP; 3275 card->state = CARD_STATE_SOFTSETUP;
@@ -3248,7 +3300,7 @@ out_remove:
3248 card->state = CARD_STATE_RECOVER; 3300 card->state = CARD_STATE_RECOVER;
3249 else 3301 else
3250 card->state = CARD_STATE_DOWN; 3302 card->state = CARD_STATE_DOWN;
3251 return -ENODEV; 3303 return rc;
3252} 3304}
3253 3305
3254static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3306static int qeth_l3_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index c144b9924d52..3360b0941aa1 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -293,31 +293,79 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
293 struct device_attribute *attr, const char *buf, size_t count) 293 struct device_attribute *attr, const char *buf, size_t count)
294{ 294{
295 struct qeth_card *card = dev_get_drvdata(dev); 295 struct qeth_card *card = dev_get_drvdata(dev);
296 enum qeth_checksum_types csum_type;
296 char *tmp; 297 char *tmp;
298 int rc;
297 299
298 if (!card) 300 if (!card)
299 return -EINVAL; 301 return -EINVAL;
300 302
301 if ((card->state != CARD_STATE_DOWN) &&
302 (card->state != CARD_STATE_RECOVER))
303 return -EPERM;
304
305 tmp = strsep((char **) &buf, "\n"); 303 tmp = strsep((char **) &buf, "\n");
306 if (!strcmp(tmp, "sw_checksumming")) 304 if (!strcmp(tmp, "sw_checksumming"))
307 card->options.checksum_type = SW_CHECKSUMMING; 305 csum_type = SW_CHECKSUMMING;
308 else if (!strcmp(tmp, "hw_checksumming")) 306 else if (!strcmp(tmp, "hw_checksumming"))
309 card->options.checksum_type = HW_CHECKSUMMING; 307 csum_type = HW_CHECKSUMMING;
310 else if (!strcmp(tmp, "no_checksumming")) 308 else if (!strcmp(tmp, "no_checksumming"))
311 card->options.checksum_type = NO_CHECKSUMMING; 309 csum_type = NO_CHECKSUMMING;
312 else { 310 else
313 return -EINVAL; 311 return -EINVAL;
314 } 312
313 rc = qeth_l3_set_rx_csum(card, csum_type);
314 if (rc)
315 return rc;
315 return count; 316 return count;
316} 317}
317 318
318static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
319 qeth_l3_dev_checksum_store); 320 qeth_l3_dev_checksum_store);
320 321
322static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct qeth_card *card = dev_get_drvdata(dev);
326
327 if (!card)
328 return -EINVAL;
329
330 switch (card->options.large_send) {
331 case QETH_LARGE_SEND_NO:
332 return sprintf(buf, "%s\n", "no");
333 case QETH_LARGE_SEND_TSO:
334 return sprintf(buf, "%s\n", "TSO");
335 default:
336 return sprintf(buf, "%s\n", "N/A");
337 }
338}
339
340static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
341 struct device_attribute *attr, const char *buf, size_t count)
342{
343 struct qeth_card *card = dev_get_drvdata(dev);
344 enum qeth_large_send_types type;
345 int rc = 0;
346 char *tmp;
347
348 if (!card)
349 return -EINVAL;
350 tmp = strsep((char **) &buf, "\n");
351 if (!strcmp(tmp, "no"))
352 type = QETH_LARGE_SEND_NO;
353 else if (!strcmp(tmp, "TSO"))
354 type = QETH_LARGE_SEND_TSO;
355 else
356 return -EINVAL;
357
358 if (card->options.large_send == type)
359 return count;
360 rc = qeth_l3_set_large_send(card, type);
361 if (rc)
362 return rc;
363 return count;
364}
365
366static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
367 qeth_l3_dev_large_send_store);
368
321static struct attribute *qeth_l3_device_attrs[] = { 369static struct attribute *qeth_l3_device_attrs[] = {
322 &dev_attr_route4.attr, 370 &dev_attr_route4.attr,
323 &dev_attr_route6.attr, 371 &dev_attr_route6.attr,
@@ -325,6 +373,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
325 &dev_attr_broadcast_mode.attr, 373 &dev_attr_broadcast_mode.attr,
326 &dev_attr_canonical_macaddr.attr, 374 &dev_attr_canonical_macaddr.attr,
327 &dev_attr_checksumming.attr, 375 &dev_attr_checksumming.attr,
376 &dev_attr_large_send.attr,
328 NULL, 377 NULL,
329}; 378};
330 379
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 102000d1af6f..67f2485d2372 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -158,12 +158,17 @@ static int smsg_pm_restore_thaw(struct device *dev)
158 smsg_path->flags = 0; 158 smsg_path->flags = 0;
159 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", 159 rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
160 NULL, NULL, NULL); 160 NULL, NULL, NULL);
161 printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); 161#ifdef CONFIG_PM_DEBUG
162 if (rc)
163 printk(KERN_ERR
164 "iucv_path_connect returned with rc %i\n", rc);
165#endif
166 cpcmd("SET SMSG IUCV", NULL, 0, NULL);
162 } 167 }
163 return 0; 168 return 0;
164} 169}
165 170
166static struct dev_pm_ops smsg_pm_ops = { 171static const struct dev_pm_ops smsg_pm_ops = {
167 .freeze = smsg_pm_freeze, 172 .freeze = smsg_pm_freeze,
168 .thaw = smsg_pm_restore_thaw, 173 .thaw = smsg_pm_restore_thaw,
169 .restore = smsg_pm_restore_thaw, 174 .restore = smsg_pm_restore_thaw,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1be6bf7e8ce6..9d0c941b7d33 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -31,6 +31,7 @@
31#include <linux/miscdevice.h> 31#include <linux/miscdevice.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include "zfcp_ext.h" 33#include "zfcp_ext.h"
34#include "zfcp_fc.h"
34 35
35#define ZFCP_BUS_ID_SIZE 20 36#define ZFCP_BUS_ID_SIZE 20
36 37
@@ -80,51 +81,53 @@ int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
80 81
81static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun) 82static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
82{ 83{
84 struct ccw_device *cdev;
83 struct zfcp_adapter *adapter; 85 struct zfcp_adapter *adapter;
84 struct zfcp_port *port; 86 struct zfcp_port *port;
85 struct zfcp_unit *unit; 87 struct zfcp_unit *unit;
86 88
87 mutex_lock(&zfcp_data.config_mutex); 89 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
88 read_lock_irq(&zfcp_data.config_lock); 90 if (!cdev)
89 adapter = zfcp_get_adapter_by_busid(busid); 91 return;
90 if (adapter) 92
91 zfcp_adapter_get(adapter); 93 if (ccw_device_set_online(cdev))
92 read_unlock_irq(&zfcp_data.config_lock); 94 goto out_ccw_device;
93 95
96 adapter = zfcp_ccw_adapter_by_cdev(cdev);
94 if (!adapter) 97 if (!adapter)
95 goto out_adapter; 98 goto out_ccw_device;
96 port = zfcp_port_enqueue(adapter, wwpn, 0, 0); 99
97 if (IS_ERR(port)) 100 port = zfcp_get_port_by_wwpn(adapter, wwpn);
101 if (!port)
98 goto out_port; 102 goto out_port;
103
99 unit = zfcp_unit_enqueue(port, lun); 104 unit = zfcp_unit_enqueue(port, lun);
100 if (IS_ERR(unit)) 105 if (IS_ERR(unit))
101 goto out_unit; 106 goto out_unit;
102 mutex_unlock(&zfcp_data.config_mutex);
103 ccw_device_set_online(adapter->ccw_device);
104 107
108 zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
105 zfcp_erp_wait(adapter); 109 zfcp_erp_wait(adapter);
106 flush_work(&unit->scsi_work); 110 flush_work(&unit->scsi_work);
107 111
108 mutex_lock(&zfcp_data.config_mutex);
109 zfcp_unit_put(unit);
110out_unit: 112out_unit:
111 zfcp_port_put(port); 113 put_device(&port->sysfs_device);
112out_port: 114out_port:
113 zfcp_adapter_put(adapter); 115 zfcp_ccw_adapter_put(adapter);
114out_adapter: 116out_ccw_device:
115 mutex_unlock(&zfcp_data.config_mutex); 117 put_device(&cdev->dev);
116 return; 118 return;
117} 119}
118 120
119static void __init zfcp_init_device_setup(char *devstr) 121static void __init zfcp_init_device_setup(char *devstr)
120{ 122{
121 char *token; 123 char *token;
122 char *str; 124 char *str, *str_saved;
123 char busid[ZFCP_BUS_ID_SIZE]; 125 char busid[ZFCP_BUS_ID_SIZE];
124 u64 wwpn, lun; 126 u64 wwpn, lun;
125 127
126 /* duplicate devstr and keep the original for sysfs presentation*/ 128 /* duplicate devstr and keep the original for sysfs presentation*/
127 str = kmalloc(strlen(devstr) + 1, GFP_KERNEL); 129 str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL);
130 str = str_saved;
128 if (!str) 131 if (!str)
129 return; 132 return;
130 133
@@ -143,12 +146,12 @@ static void __init zfcp_init_device_setup(char *devstr)
143 if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun)) 146 if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
144 goto err_out; 147 goto err_out;
145 148
146 kfree(str); 149 kfree(str_saved);
147 zfcp_init_device_configure(busid, wwpn, lun); 150 zfcp_init_device_configure(busid, wwpn, lun);
148 return; 151 return;
149 152
150 err_out: 153err_out:
151 kfree(str); 154 kfree(str_saved);
152 pr_err("%s is not a valid SCSI device\n", devstr); 155 pr_err("%s is not a valid SCSI device\n", devstr);
153} 156}
154 157
@@ -157,7 +160,7 @@ static int __init zfcp_module_init(void)
157 int retval = -ENOMEM; 160 int retval = -ENOMEM;
158 161
159 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", 162 zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
160 sizeof(struct ct_iu_gpn_ft_req)); 163 sizeof(struct zfcp_fc_gpn_ft_req));
161 if (!zfcp_data.gpn_ft_cache) 164 if (!zfcp_data.gpn_ft_cache)
162 goto out; 165 goto out;
163 166
@@ -172,12 +175,14 @@ static int __init zfcp_module_init(void)
172 goto out_sr_cache; 175 goto out_sr_cache;
173 176
174 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", 177 zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
175 sizeof(struct zfcp_gid_pn_data)); 178 sizeof(struct zfcp_fc_gid_pn));
176 if (!zfcp_data.gid_pn_cache) 179 if (!zfcp_data.gid_pn_cache)
177 goto out_gid_cache; 180 goto out_gid_cache;
178 181
179 mutex_init(&zfcp_data.config_mutex); 182 zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
180 rwlock_init(&zfcp_data.config_lock); 183 sizeof(struct zfcp_fc_els_adisc));
184 if (!zfcp_data.adisc_cache)
185 goto out_adisc_cache;
181 186
182 zfcp_data.scsi_transport_template = 187 zfcp_data.scsi_transport_template =
183 fc_attach_transport(&zfcp_transport_functions); 188 fc_attach_transport(&zfcp_transport_functions);
@@ -190,7 +195,7 @@ static int __init zfcp_module_init(void)
190 goto out_misc; 195 goto out_misc;
191 } 196 }
192 197
193 retval = zfcp_ccw_register(); 198 retval = ccw_driver_register(&zfcp_ccw_driver);
194 if (retval) { 199 if (retval) {
195 pr_err("The zfcp device driver could not register with " 200 pr_err("The zfcp device driver could not register with "
196 "the common I/O layer\n"); 201 "the common I/O layer\n");
@@ -206,6 +211,8 @@ out_ccw_register:
206out_misc: 211out_misc:
207 fc_release_transport(zfcp_data.scsi_transport_template); 212 fc_release_transport(zfcp_data.scsi_transport_template);
208out_transport: 213out_transport:
214 kmem_cache_destroy(zfcp_data.adisc_cache);
215out_adisc_cache:
209 kmem_cache_destroy(zfcp_data.gid_pn_cache); 216 kmem_cache_destroy(zfcp_data.gid_pn_cache);
210out_gid_cache: 217out_gid_cache:
211 kmem_cache_destroy(zfcp_data.sr_buffer_cache); 218 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
@@ -219,6 +226,20 @@ out:
219 226
220module_init(zfcp_module_init); 227module_init(zfcp_module_init);
221 228
229static void __exit zfcp_module_exit(void)
230{
231 ccw_driver_unregister(&zfcp_ccw_driver);
232 misc_deregister(&zfcp_cfdc_misc);
233 fc_release_transport(zfcp_data.scsi_transport_template);
234 kmem_cache_destroy(zfcp_data.adisc_cache);
235 kmem_cache_destroy(zfcp_data.gid_pn_cache);
236 kmem_cache_destroy(zfcp_data.sr_buffer_cache);
237 kmem_cache_destroy(zfcp_data.qtcb_cache);
238 kmem_cache_destroy(zfcp_data.gpn_ft_cache);
239}
240
241module_exit(zfcp_module_exit);
242
222/** 243/**
223 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN 244 * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
224 * @port: pointer to port to search for unit 245 * @port: pointer to port to search for unit
@@ -228,12 +249,18 @@ module_init(zfcp_module_init);
228 */ 249 */
229struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun) 250struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
230{ 251{
252 unsigned long flags;
231 struct zfcp_unit *unit; 253 struct zfcp_unit *unit;
232 254
233 list_for_each_entry(unit, &port->unit_list_head, list) 255 read_lock_irqsave(&port->unit_list_lock, flags);
234 if ((unit->fcp_lun == fcp_lun) && 256 list_for_each_entry(unit, &port->unit_list, list)
235 !(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_REMOVE)) 257 if (unit->fcp_lun == fcp_lun) {
236 return unit; 258 if (!get_device(&unit->sysfs_device))
259 unit = NULL;
260 read_unlock_irqrestore(&port->unit_list_lock, flags);
261 return unit;
262 }
263 read_unlock_irqrestore(&port->unit_list_lock, flags);
237 return NULL; 264 return NULL;
238} 265}
239 266
@@ -247,18 +274,35 @@ struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
247struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter, 274struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
248 u64 wwpn) 275 u64 wwpn)
249{ 276{
277 unsigned long flags;
250 struct zfcp_port *port; 278 struct zfcp_port *port;
251 279
252 list_for_each_entry(port, &adapter->port_list_head, list) 280 read_lock_irqsave(&adapter->port_list_lock, flags);
253 if ((port->wwpn == wwpn) && 281 list_for_each_entry(port, &adapter->port_list, list)
254 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE)) 282 if (port->wwpn == wwpn) {
283 if (!get_device(&port->sysfs_device))
284 port = NULL;
285 read_unlock_irqrestore(&adapter->port_list_lock, flags);
255 return port; 286 return port;
287 }
288 read_unlock_irqrestore(&adapter->port_list_lock, flags);
256 return NULL; 289 return NULL;
257} 290}
258 291
259static void zfcp_sysfs_unit_release(struct device *dev) 292/**
293 * zfcp_unit_release - dequeue unit
294 * @dev: pointer to device
295 *
296 * waits until all work is done on unit and removes it then from the unit->list
297 * of the associated port.
298 */
299static void zfcp_unit_release(struct device *dev)
260{ 300{
261 kfree(container_of(dev, struct zfcp_unit, sysfs_device)); 301 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit,
302 sysfs_device);
303
304 put_device(&unit->port->sysfs_device);
305 kfree(unit);
262} 306}
263 307
264/** 308/**
@@ -266,43 +310,40 @@ static void zfcp_sysfs_unit_release(struct device *dev)
266 * @port: pointer to port where unit is added 310 * @port: pointer to port where unit is added
267 * @fcp_lun: FCP LUN of unit to be enqueued 311 * @fcp_lun: FCP LUN of unit to be enqueued
268 * Returns: pointer to enqueued unit on success, ERR_PTR on error 312 * Returns: pointer to enqueued unit on success, ERR_PTR on error
269 * Locks: config_mutex must be held to serialize changes to the unit list
270 * 313 *
271 * Sets up some unit internal structures and creates sysfs entry. 314 * Sets up some unit internal structures and creates sysfs entry.
272 */ 315 */
273struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun) 316struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
274{ 317{
275 struct zfcp_unit *unit; 318 struct zfcp_unit *unit;
319 int retval = -ENOMEM;
320
321 get_device(&port->sysfs_device);
276 322
277 read_lock_irq(&zfcp_data.config_lock); 323 unit = zfcp_get_unit_by_lun(port, fcp_lun);
278 if (zfcp_get_unit_by_lun(port, fcp_lun)) { 324 if (unit) {
279 read_unlock_irq(&zfcp_data.config_lock); 325 put_device(&unit->sysfs_device);
280 return ERR_PTR(-EINVAL); 326 retval = -EEXIST;
327 goto err_out;
281 } 328 }
282 read_unlock_irq(&zfcp_data.config_lock);
283 329
284 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); 330 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
285 if (!unit) 331 if (!unit)
286 return ERR_PTR(-ENOMEM); 332 goto err_out;
287
288 atomic_set(&unit->refcount, 0);
289 init_waitqueue_head(&unit->remove_wq);
290 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
291 333
292 unit->port = port; 334 unit->port = port;
293 unit->fcp_lun = fcp_lun; 335 unit->fcp_lun = fcp_lun;
336 unit->sysfs_device.parent = &port->sysfs_device;
337 unit->sysfs_device.release = zfcp_unit_release;
294 338
295 if (dev_set_name(&unit->sysfs_device, "0x%016llx", 339 if (dev_set_name(&unit->sysfs_device, "0x%016llx",
296 (unsigned long long) fcp_lun)) { 340 (unsigned long long) fcp_lun)) {
297 kfree(unit); 341 kfree(unit);
298 return ERR_PTR(-ENOMEM); 342 goto err_out;
299 } 343 }
300 unit->sysfs_device.parent = &port->sysfs_device; 344 retval = -EINVAL;
301 unit->sysfs_device.release = zfcp_sysfs_unit_release;
302 dev_set_drvdata(&unit->sysfs_device, unit);
303 345
304 /* mark unit unusable as long as sysfs registration is not complete */ 346 INIT_WORK(&unit->scsi_work, zfcp_scsi_scan);
305 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
306 347
307 spin_lock_init(&unit->latencies.lock); 348 spin_lock_init(&unit->latencies.lock);
308 unit->latencies.write.channel.min = 0xFFFFFFFF; 349 unit->latencies.write.channel.min = 0xFFFFFFFF;
@@ -314,50 +355,30 @@ struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
314 355
315 if (device_register(&unit->sysfs_device)) { 356 if (device_register(&unit->sysfs_device)) {
316 put_device(&unit->sysfs_device); 357 put_device(&unit->sysfs_device);
317 return ERR_PTR(-EINVAL); 358 goto err_out;
318 } 359 }
319 360
320 if (sysfs_create_group(&unit->sysfs_device.kobj, 361 if (sysfs_create_group(&unit->sysfs_device.kobj,
321 &zfcp_sysfs_unit_attrs)) { 362 &zfcp_sysfs_unit_attrs))
322 device_unregister(&unit->sysfs_device); 363 goto err_out_put;
323 return ERR_PTR(-EINVAL);
324 }
325 364
326 zfcp_unit_get(unit); 365 write_lock_irq(&port->unit_list_lock);
366 list_add_tail(&unit->list, &port->unit_list);
367 write_unlock_irq(&port->unit_list_lock);
327 368
328 write_lock_irq(&zfcp_data.config_lock);
329 list_add_tail(&unit->list, &port->unit_list_head);
330 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &unit->status);
331 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status); 369 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
332 370
333 write_unlock_irq(&zfcp_data.config_lock);
334
335 zfcp_port_get(port);
336
337 return unit; 371 return unit;
338}
339 372
340/** 373err_out_put:
341 * zfcp_unit_dequeue - dequeue unit
342 * @unit: pointer to zfcp_unit
343 *
344 * waits until all work is done on unit and removes it then from the unit->list
345 * of the associated port.
346 */
347void zfcp_unit_dequeue(struct zfcp_unit *unit)
348{
349 wait_event(unit->remove_wq, atomic_read(&unit->refcount) == 0);
350 write_lock_irq(&zfcp_data.config_lock);
351 list_del(&unit->list);
352 write_unlock_irq(&zfcp_data.config_lock);
353 zfcp_port_put(unit->port);
354 sysfs_remove_group(&unit->sysfs_device.kobj, &zfcp_sysfs_unit_attrs);
355 device_unregister(&unit->sysfs_device); 374 device_unregister(&unit->sysfs_device);
375err_out:
376 put_device(&port->sysfs_device);
377 return ERR_PTR(retval);
356} 378}
357 379
358static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 380static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
359{ 381{
360 /* must only be called with zfcp_data.config_mutex taken */
361 adapter->pool.erp_req = 382 adapter->pool.erp_req =
362 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req)); 383 mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
363 if (!adapter->pool.erp_req) 384 if (!adapter->pool.erp_req)
@@ -395,9 +416,9 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
395 if (!adapter->pool.status_read_data) 416 if (!adapter->pool.status_read_data)
396 return -ENOMEM; 417 return -ENOMEM;
397 418
398 adapter->pool.gid_pn_data = 419 adapter->pool.gid_pn =
399 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); 420 mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
400 if (!adapter->pool.gid_pn_data) 421 if (!adapter->pool.gid_pn)
401 return -ENOMEM; 422 return -ENOMEM;
402 423
403 return 0; 424 return 0;
@@ -405,7 +426,6 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
405 426
406static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) 427static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
407{ 428{
408 /* zfcp_data.config_mutex must be held */
409 if (adapter->pool.erp_req) 429 if (adapter->pool.erp_req)
410 mempool_destroy(adapter->pool.erp_req); 430 mempool_destroy(adapter->pool.erp_req);
411 if (adapter->pool.scsi_req) 431 if (adapter->pool.scsi_req)
@@ -418,8 +438,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
418 mempool_destroy(adapter->pool.status_read_req); 438 mempool_destroy(adapter->pool.status_read_req);
419 if (adapter->pool.status_read_data) 439 if (adapter->pool.status_read_data)
420 mempool_destroy(adapter->pool.status_read_data); 440 mempool_destroy(adapter->pool.status_read_data);
421 if (adapter->pool.gid_pn_data) 441 if (adapter->pool.gid_pn)
422 mempool_destroy(adapter->pool.gid_pn_data); 442 mempool_destroy(adapter->pool.gid_pn);
423} 443}
424 444
425/** 445/**
@@ -487,53 +507,56 @@ static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
487 * zfcp_adapter_enqueue - enqueue a new adapter to the list 507 * zfcp_adapter_enqueue - enqueue a new adapter to the list
488 * @ccw_device: pointer to the struct cc_device 508 * @ccw_device: pointer to the struct cc_device
489 * 509 *
490 * Returns: 0 if a new adapter was successfully enqueued 510 * Returns: struct zfcp_adapter*
491 * -ENOMEM if alloc failed
492 * Enqueues an adapter at the end of the adapter list in the driver data. 511 * Enqueues an adapter at the end of the adapter list in the driver data.
493 * All adapter internal structures are set up. 512 * All adapter internal structures are set up.
494 * Proc-fs entries are also created. 513 * Proc-fs entries are also created.
495 * locks: config_mutex must be held to serialize changes to the adapter list
496 */ 514 */
497int zfcp_adapter_enqueue(struct ccw_device *ccw_device) 515struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
498{ 516{
499 struct zfcp_adapter *adapter; 517 struct zfcp_adapter *adapter;
500 518
501 /* 519 if (!get_device(&ccw_device->dev))
502 * Note: It is safe to release the list_lock, as any list changes 520 return ERR_PTR(-ENODEV);
503 * are protected by the config_mutex, which must be held to get here
504 */
505 521
506 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL); 522 adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
507 if (!adapter) 523 if (!adapter) {
508 return -ENOMEM; 524 put_device(&ccw_device->dev);
525 return ERR_PTR(-ENOMEM);
526 }
527
528 kref_init(&adapter->ref);
509 529
510 ccw_device->handler = NULL; 530 ccw_device->handler = NULL;
511 adapter->ccw_device = ccw_device; 531 adapter->ccw_device = ccw_device;
512 atomic_set(&adapter->refcount, 0); 532
533 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
534 INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
513 535
514 if (zfcp_qdio_setup(adapter)) 536 if (zfcp_qdio_setup(adapter))
515 goto qdio_failed; 537 goto failed;
516 538
517 if (zfcp_allocate_low_mem_buffers(adapter)) 539 if (zfcp_allocate_low_mem_buffers(adapter))
518 goto low_mem_buffers_failed; 540 goto failed;
519 541
520 if (zfcp_reqlist_alloc(adapter)) 542 if (zfcp_reqlist_alloc(adapter))
521 goto low_mem_buffers_failed; 543 goto failed;
522 544
523 if (zfcp_dbf_adapter_register(adapter)) 545 if (zfcp_dbf_adapter_register(adapter))
524 goto debug_register_failed; 546 goto failed;
525 547
526 if (zfcp_setup_adapter_work_queue(adapter)) 548 if (zfcp_setup_adapter_work_queue(adapter))
527 goto work_queue_failed; 549 goto failed;
528 550
529 if (zfcp_fc_gs_setup(adapter)) 551 if (zfcp_fc_gs_setup(adapter))
530 goto generic_services_failed; 552 goto failed;
553
554 rwlock_init(&adapter->port_list_lock);
555 INIT_LIST_HEAD(&adapter->port_list);
531 556
532 init_waitqueue_head(&adapter->remove_wq);
533 init_waitqueue_head(&adapter->erp_ready_wq); 557 init_waitqueue_head(&adapter->erp_ready_wq);
534 init_waitqueue_head(&adapter->erp_done_wqh); 558 init_waitqueue_head(&adapter->erp_done_wqh);
535 559
536 INIT_LIST_HEAD(&adapter->port_list_head);
537 INIT_LIST_HEAD(&adapter->erp_ready_head); 560 INIT_LIST_HEAD(&adapter->erp_ready_head);
538 INIT_LIST_HEAD(&adapter->erp_running_head); 561 INIT_LIST_HEAD(&adapter->erp_running_head);
539 562
@@ -543,85 +566,85 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
543 rwlock_init(&adapter->abort_lock); 566 rwlock_init(&adapter->abort_lock);
544 567
545 if (zfcp_erp_thread_setup(adapter)) 568 if (zfcp_erp_thread_setup(adapter))
546 goto erp_thread_failed; 569 goto failed;
547
548 INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
549 INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
550 570
551 adapter->service_level.seq_print = zfcp_print_sl; 571 adapter->service_level.seq_print = zfcp_print_sl;
552 572
553 /* mark adapter unusable as long as sysfs registration is not complete */
554 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
555
556 dev_set_drvdata(&ccw_device->dev, adapter); 573 dev_set_drvdata(&ccw_device->dev, adapter);
557 574
558 if (sysfs_create_group(&ccw_device->dev.kobj, 575 if (sysfs_create_group(&ccw_device->dev.kobj,
559 &zfcp_sysfs_adapter_attrs)) 576 &zfcp_sysfs_adapter_attrs))
560 goto sysfs_failed; 577 goto failed;
561
562 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
563 578
564 if (!zfcp_adapter_scsi_register(adapter)) 579 if (!zfcp_adapter_scsi_register(adapter))
565 return 0; 580 return adapter;
566 581
567sysfs_failed: 582failed:
568 zfcp_erp_thread_kill(adapter); 583 zfcp_adapter_unregister(adapter);
569erp_thread_failed: 584 return ERR_PTR(-ENOMEM);
570 zfcp_fc_gs_destroy(adapter); 585}
571generic_services_failed: 586
587void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
588{
589 struct ccw_device *cdev = adapter->ccw_device;
590
591 cancel_work_sync(&adapter->scan_work);
592 cancel_work_sync(&adapter->stat_work);
572 zfcp_destroy_adapter_work_queue(adapter); 593 zfcp_destroy_adapter_work_queue(adapter);
573work_queue_failed: 594
595 zfcp_fc_wka_ports_force_offline(adapter->gs);
596 zfcp_adapter_scsi_unregister(adapter);
597 sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
598
599 zfcp_erp_thread_kill(adapter);
574 zfcp_dbf_adapter_unregister(adapter->dbf); 600 zfcp_dbf_adapter_unregister(adapter->dbf);
575debug_register_failed:
576 dev_set_drvdata(&ccw_device->dev, NULL);
577 kfree(adapter->req_list);
578low_mem_buffers_failed:
579 zfcp_free_low_mem_buffers(adapter);
580qdio_failed:
581 zfcp_qdio_destroy(adapter->qdio); 601 zfcp_qdio_destroy(adapter->qdio);
582 kfree(adapter); 602
583 return -ENOMEM; 603 zfcp_ccw_adapter_put(adapter); /* final put to release */
584} 604}
585 605
586/** 606/**
587 * zfcp_adapter_dequeue - remove the adapter from the resource list 607 * zfcp_adapter_release - remove the adapter from the resource list
588 * @adapter: pointer to struct zfcp_adapter which should be removed 608 * @ref: pointer to struct kref
589 * locks: adapter list write lock is assumed to be held by caller 609 * locks: adapter list write lock is assumed to be held by caller
590 */ 610 */
591void zfcp_adapter_dequeue(struct zfcp_adapter *adapter) 611void zfcp_adapter_release(struct kref *ref)
592{ 612{
593 int retval = 0; 613 struct zfcp_adapter *adapter = container_of(ref, struct zfcp_adapter,
594 unsigned long flags; 614 ref);
615 struct ccw_device *cdev = adapter->ccw_device;
595 616
596 cancel_work_sync(&adapter->scan_work);
597 cancel_work_sync(&adapter->stat_work);
598 zfcp_fc_wka_ports_force_offline(adapter->gs);
599 zfcp_adapter_scsi_unregister(adapter);
600 sysfs_remove_group(&adapter->ccw_device->dev.kobj,
601 &zfcp_sysfs_adapter_attrs);
602 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 617 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
603 /* sanity check: no pending FSF requests */
604 spin_lock_irqsave(&adapter->req_list_lock, flags);
605 retval = zfcp_reqlist_isempty(adapter);
606 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
607 if (!retval)
608 return;
609
610 zfcp_fc_gs_destroy(adapter); 618 zfcp_fc_gs_destroy(adapter);
611 zfcp_erp_thread_kill(adapter);
612 zfcp_destroy_adapter_work_queue(adapter);
613 zfcp_dbf_adapter_unregister(adapter->dbf);
614 zfcp_free_low_mem_buffers(adapter); 619 zfcp_free_low_mem_buffers(adapter);
615 zfcp_qdio_destroy(adapter->qdio);
616 kfree(adapter->req_list); 620 kfree(adapter->req_list);
617 kfree(adapter->fc_stats); 621 kfree(adapter->fc_stats);
618 kfree(adapter->stats_reset_data); 622 kfree(adapter->stats_reset_data);
619 kfree(adapter); 623 kfree(adapter);
624 put_device(&cdev->dev);
620} 625}
621 626
622static void zfcp_sysfs_port_release(struct device *dev) 627/**
628 * zfcp_device_unregister - remove port, unit from system
629 * @dev: reference to device which is to be removed
630 * @grp: related reference to attribute group
631 *
632 * Helper function to unregister port, unit from system
633 */
634void zfcp_device_unregister(struct device *dev,
635 const struct attribute_group *grp)
623{ 636{
624 kfree(container_of(dev, struct zfcp_port, sysfs_device)); 637 sysfs_remove_group(&dev->kobj, grp);
638 device_unregister(dev);
639}
640
641static void zfcp_port_release(struct device *dev)
642{
643 struct zfcp_port *port = container_of(dev, struct zfcp_port,
644 sysfs_device);
645
646 zfcp_ccw_adapter_put(port->adapter);
647 kfree(port);
625} 648}
626 649
627/** 650/**
@@ -631,7 +654,6 @@ static void zfcp_sysfs_port_release(struct device *dev)
631 * @status: initial status for the port 654 * @status: initial status for the port
632 * @d_id: destination id of the remote port to be enqueued 655 * @d_id: destination id of the remote port to be enqueued
633 * Returns: pointer to enqueued port on success, ERR_PTR on error 656 * Returns: pointer to enqueued port on success, ERR_PTR on error
634 * Locks: config_mutex must be held to serialize changes to the port list
635 * 657 *
636 * All port internal structures are set up and the sysfs entry is generated. 658 * All port internal structures are set up and the sysfs entry is generated.
637 * d_id is used to enqueue ports with a well known address like the Directory 659 * d_id is used to enqueue ports with a well known address like the Directory
@@ -641,20 +663,24 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
641 u32 status, u32 d_id) 663 u32 status, u32 d_id)
642{ 664{
643 struct zfcp_port *port; 665 struct zfcp_port *port;
666 int retval = -ENOMEM;
667
668 kref_get(&adapter->ref);
644 669
645 read_lock_irq(&zfcp_data.config_lock); 670 port = zfcp_get_port_by_wwpn(adapter, wwpn);
646 if (zfcp_get_port_by_wwpn(adapter, wwpn)) { 671 if (port) {
647 read_unlock_irq(&zfcp_data.config_lock); 672 put_device(&port->sysfs_device);
648 return ERR_PTR(-EINVAL); 673 retval = -EEXIST;
674 goto err_out;
649 } 675 }
650 read_unlock_irq(&zfcp_data.config_lock);
651 676
652 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL); 677 port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
653 if (!port) 678 if (!port)
654 return ERR_PTR(-ENOMEM); 679 goto err_out;
680
681 rwlock_init(&port->unit_list_lock);
682 INIT_LIST_HEAD(&port->unit_list);
655 683
656 init_waitqueue_head(&port->remove_wq);
657 INIT_LIST_HEAD(&port->unit_list_head);
658 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); 684 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
659 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); 685 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
660 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work); 686 INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
@@ -663,58 +689,38 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
663 port->d_id = d_id; 689 port->d_id = d_id;
664 port->wwpn = wwpn; 690 port->wwpn = wwpn;
665 port->rport_task = RPORT_NONE; 691 port->rport_task = RPORT_NONE;
666 692 port->sysfs_device.parent = &adapter->ccw_device->dev;
667 /* mark port unusable as long as sysfs registration is not complete */ 693 port->sysfs_device.release = zfcp_port_release;
668 atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
669 atomic_set(&port->refcount, 0);
670 694
671 if (dev_set_name(&port->sysfs_device, "0x%016llx", 695 if (dev_set_name(&port->sysfs_device, "0x%016llx",
672 (unsigned long long)wwpn)) { 696 (unsigned long long)wwpn)) {
673 kfree(port); 697 kfree(port);
674 return ERR_PTR(-ENOMEM); 698 goto err_out;
675 } 699 }
676 port->sysfs_device.parent = &adapter->ccw_device->dev; 700 retval = -EINVAL;
677 port->sysfs_device.release = zfcp_sysfs_port_release;
678 dev_set_drvdata(&port->sysfs_device, port);
679 701
680 if (device_register(&port->sysfs_device)) { 702 if (device_register(&port->sysfs_device)) {
681 put_device(&port->sysfs_device); 703 put_device(&port->sysfs_device);
682 return ERR_PTR(-EINVAL); 704 goto err_out;
683 } 705 }
684 706
685 if (sysfs_create_group(&port->sysfs_device.kobj, 707 if (sysfs_create_group(&port->sysfs_device.kobj,
686 &zfcp_sysfs_port_attrs)) { 708 &zfcp_sysfs_port_attrs))
687 device_unregister(&port->sysfs_device); 709 goto err_out_put;
688 return ERR_PTR(-EINVAL);
689 }
690 710
691 zfcp_port_get(port); 711 write_lock_irq(&adapter->port_list_lock);
712 list_add_tail(&port->list, &adapter->port_list);
713 write_unlock_irq(&adapter->port_list_lock);
692 714
693 write_lock_irq(&zfcp_data.config_lock); 715 atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
694 list_add_tail(&port->list, &adapter->port_list_head);
695 atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
696 atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &port->status);
697 716
698 write_unlock_irq(&zfcp_data.config_lock);
699
700 zfcp_adapter_get(adapter);
701 return port; 717 return port;
702}
703 718
704/** 719err_out_put:
705 * zfcp_port_dequeue - dequeues a port from the port list of the adapter
706 * @port: pointer to struct zfcp_port which should be removed
707 */
708void zfcp_port_dequeue(struct zfcp_port *port)
709{
710 write_lock_irq(&zfcp_data.config_lock);
711 list_del(&port->list);
712 write_unlock_irq(&zfcp_data.config_lock);
713 wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
714 cancel_work_sync(&port->rport_work); /* usually not necessary */
715 zfcp_adapter_put(port->adapter);
716 sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
717 device_unregister(&port->sysfs_device); 720 device_unregister(&port->sysfs_device);
721err_out:
722 zfcp_ccw_adapter_put(adapter);
723 return ERR_PTR(retval);
718} 724}
719 725
720/** 726/**
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 0c90f8e71605..c22cb72a5ae8 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -13,28 +13,34 @@
13 13
14#define ZFCP_MODEL_PRIV 0x4 14#define ZFCP_MODEL_PRIV 0x4
15 15
16static int zfcp_ccw_suspend(struct ccw_device *cdev) 16static DEFINE_SPINLOCK(zfcp_ccw_adapter_ref_lock);
17 17
18struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *cdev)
18{ 19{
19 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 20 struct zfcp_adapter *adapter;
20 21 unsigned long flags;
21 if (!adapter)
22 return 0;
23
24 mutex_lock(&zfcp_data.config_mutex);
25 22
26 zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); 23 spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
27 zfcp_erp_wait(adapter); 24 adapter = dev_get_drvdata(&cdev->dev);
25 if (adapter)
26 kref_get(&adapter->ref);
27 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
28 return adapter;
29}
28 30
29 mutex_unlock(&zfcp_data.config_mutex); 31void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
32{
33 unsigned long flags;
30 34
31 return 0; 35 spin_lock_irqsave(&zfcp_ccw_adapter_ref_lock, flags);
36 kref_put(&adapter->ref, zfcp_adapter_release);
37 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
32} 38}
33 39
34static int zfcp_ccw_activate(struct ccw_device *cdev) 40static int zfcp_ccw_activate(struct ccw_device *cdev)
35 41
36{ 42{
37 struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); 43 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
38 44
39 if (!adapter) 45 if (!adapter)
40 return 0; 46 return 0;
@@ -46,6 +52,8 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
46 zfcp_erp_wait(adapter); 52 zfcp_erp_wait(adapter);
47 flush_work(&adapter->scan_work); 53 flush_work(&adapter->scan_work);
48 54
55 zfcp_ccw_adapter_put(adapter);
56
49 return 0; 57 return 0;
50} 58}
51 59
@@ -67,28 +75,28 @@ int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
67 75
68/** 76/**
69 * zfcp_ccw_probe - probe function of zfcp driver 77 * zfcp_ccw_probe - probe function of zfcp driver
70 * @ccw_device: pointer to belonging ccw device 78 * @cdev: pointer to belonging ccw device
71 * 79 *
72 * This function gets called by the common i/o layer for each FCP 80 * This function gets called by the common i/o layer for each FCP
73 * device found on the current system. This is only a stub to make cio 81 * device found on the current system. This is only a stub to make cio
74 * work: To only allocate adapter resources for devices actually used, 82 * work: To only allocate adapter resources for devices actually used,
75 * the allocation is deferred to the first call to ccw_set_online. 83 * the allocation is deferred to the first call to ccw_set_online.
76 */ 84 */
77static int zfcp_ccw_probe(struct ccw_device *ccw_device) 85static int zfcp_ccw_probe(struct ccw_device *cdev)
78{ 86{
79 return 0; 87 return 0;
80} 88}
81 89
82/** 90/**
83 * zfcp_ccw_remove - remove function of zfcp driver 91 * zfcp_ccw_remove - remove function of zfcp driver
84 * @ccw_device: pointer to belonging ccw device 92 * @cdev: pointer to belonging ccw device
85 * 93 *
86 * This function gets called by the common i/o layer and removes an adapter 94 * This function gets called by the common i/o layer and removes an adapter
87 * from the system. Task of this function is to get rid of all units and 95 * from the system. Task of this function is to get rid of all units and
88 * ports that belong to this adapter. And in addition all resources of this 96 * ports that belong to this adapter. And in addition all resources of this
89 * adapter will be freed too. 97 * adapter will be freed too.
90 */ 98 */
91static void zfcp_ccw_remove(struct ccw_device *ccw_device) 99static void zfcp_ccw_remove(struct ccw_device *cdev)
92{ 100{
93 struct zfcp_adapter *adapter; 101 struct zfcp_adapter *adapter;
94 struct zfcp_port *port, *p; 102 struct zfcp_port *port, *p;
@@ -96,44 +104,37 @@ static void zfcp_ccw_remove(struct ccw_device *ccw_device)
96 LIST_HEAD(unit_remove_lh); 104 LIST_HEAD(unit_remove_lh);
97 LIST_HEAD(port_remove_lh); 105 LIST_HEAD(port_remove_lh);
98 106
99 ccw_device_set_offline(ccw_device); 107 ccw_device_set_offline(cdev);
100 108
101 mutex_lock(&zfcp_data.config_mutex); 109 adapter = zfcp_ccw_adapter_by_cdev(cdev);
102 adapter = dev_get_drvdata(&ccw_device->dev);
103 if (!adapter) 110 if (!adapter)
104 goto out; 111 return;
105 112
106 write_lock_irq(&zfcp_data.config_lock); 113 write_lock_irq(&adapter->port_list_lock);
107 list_for_each_entry_safe(port, p, &adapter->port_list_head, list) { 114 list_for_each_entry_safe(port, p, &adapter->port_list, list) {
108 list_for_each_entry_safe(unit, u, &port->unit_list_head, list) { 115 write_lock(&port->unit_list_lock);
116 list_for_each_entry_safe(unit, u, &port->unit_list, list)
109 list_move(&unit->list, &unit_remove_lh); 117 list_move(&unit->list, &unit_remove_lh);
110 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, 118 write_unlock(&port->unit_list_lock);
111 &unit->status);
112 }
113 list_move(&port->list, &port_remove_lh); 119 list_move(&port->list, &port_remove_lh);
114 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
115 } 120 }
116 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); 121 write_unlock_irq(&adapter->port_list_lock);
117 write_unlock_irq(&zfcp_data.config_lock); 122 zfcp_ccw_adapter_put(adapter); /* put from zfcp_ccw_adapter_by_cdev */
118 123
119 list_for_each_entry_safe(port, p, &port_remove_lh, list) { 124 list_for_each_entry_safe(unit, u, &unit_remove_lh, list)
120 list_for_each_entry_safe(unit, u, &unit_remove_lh, list) { 125 zfcp_device_unregister(&unit->sysfs_device,
121 if (unit->device) 126 &zfcp_sysfs_unit_attrs);
122 scsi_remove_device(unit->device);
123 zfcp_unit_dequeue(unit);
124 }
125 zfcp_port_dequeue(port);
126 }
127 wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
128 zfcp_adapter_dequeue(adapter);
129 127
130out: 128 list_for_each_entry_safe(port, p, &port_remove_lh, list)
131 mutex_unlock(&zfcp_data.config_mutex); 129 zfcp_device_unregister(&port->sysfs_device,
130 &zfcp_sysfs_port_attrs);
131
132 zfcp_adapter_unregister(adapter);
132} 133}
133 134
134/** 135/**
135 * zfcp_ccw_set_online - set_online function of zfcp driver 136 * zfcp_ccw_set_online - set_online function of zfcp driver
136 * @ccw_device: pointer to belonging ccw device 137 * @cdev: pointer to belonging ccw device
137 * 138 *
138 * This function gets called by the common i/o layer and sets an 139 * This function gets called by the common i/o layer and sets an
139 * adapter into state online. The first call will allocate all 140 * adapter into state online. The first call will allocate all
@@ -144,23 +145,20 @@ out:
144 * the SCSI stack, that the QDIO queues will be set up and that the 145 * the SCSI stack, that the QDIO queues will be set up and that the
145 * adapter will be opened. 146 * adapter will be opened.
146 */ 147 */
147static int zfcp_ccw_set_online(struct ccw_device *ccw_device) 148static int zfcp_ccw_set_online(struct ccw_device *cdev)
148{ 149{
149 struct zfcp_adapter *adapter; 150 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
150 int ret = 0;
151
152 mutex_lock(&zfcp_data.config_mutex);
153 adapter = dev_get_drvdata(&ccw_device->dev);
154 151
155 if (!adapter) { 152 if (!adapter) {
156 ret = zfcp_adapter_enqueue(ccw_device); 153 adapter = zfcp_adapter_enqueue(cdev);
157 if (ret) { 154
158 dev_err(&ccw_device->dev, 155 if (IS_ERR(adapter)) {
156 dev_err(&cdev->dev,
159 "Setting up data structures for the " 157 "Setting up data structures for the "
160 "FCP adapter failed\n"); 158 "FCP adapter failed\n");
161 goto out; 159 return PTR_ERR(adapter);
162 } 160 }
163 adapter = dev_get_drvdata(&ccw_device->dev); 161 kref_get(&adapter->ref);
164 } 162 }
165 163
166 /* initialize request counter */ 164 /* initialize request counter */
@@ -172,62 +170,61 @@ static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
172 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 170 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
173 "ccsonl2", NULL); 171 "ccsonl2", NULL);
174 zfcp_erp_wait(adapter); 172 zfcp_erp_wait(adapter);
175out: 173
176 mutex_unlock(&zfcp_data.config_mutex); 174 flush_work(&adapter->scan_work);
177 if (!ret) 175
178 flush_work(&adapter->scan_work); 176 zfcp_ccw_adapter_put(adapter);
179 return ret; 177 return 0;
180} 178}
181 179
182/** 180/**
183 * zfcp_ccw_set_offline - set_offline function of zfcp driver 181 * zfcp_ccw_set_offline - set_offline function of zfcp driver
184 * @ccw_device: pointer to belonging ccw device 182 * @cdev: pointer to belonging ccw device
185 * 183 *
186 * This function gets called by the common i/o layer and sets an adapter 184 * This function gets called by the common i/o layer and sets an adapter
187 * into state offline. 185 * into state offline.
188 */ 186 */
189static int zfcp_ccw_set_offline(struct ccw_device *ccw_device) 187static int zfcp_ccw_set_offline(struct ccw_device *cdev)
190{ 188{
191 struct zfcp_adapter *adapter; 189 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
192 190
193 mutex_lock(&zfcp_data.config_mutex);
194 adapter = dev_get_drvdata(&ccw_device->dev);
195 if (!adapter) 191 if (!adapter)
196 goto out; 192 return 0;
197 193
198 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL); 194 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
199 zfcp_erp_wait(adapter); 195 zfcp_erp_wait(adapter);
200 mutex_unlock(&zfcp_data.config_mutex); 196
201out: 197 zfcp_ccw_adapter_put(adapter);
202 return 0; 198 return 0;
203} 199}
204 200
205/** 201/**
206 * zfcp_ccw_notify - ccw notify function 202 * zfcp_ccw_notify - ccw notify function
207 * @ccw_device: pointer to belonging ccw device 203 * @cdev: pointer to belonging ccw device
208 * @event: indicates if adapter was detached or attached 204 * @event: indicates if adapter was detached or attached
209 * 205 *
210 * This function gets called by the common i/o layer if an adapter has gone 206 * This function gets called by the common i/o layer if an adapter has gone
211 * or reappeared. 207 * or reappeared.
212 */ 208 */
213static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) 209static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
214{ 210{
215 struct zfcp_adapter *adapter = dev_get_drvdata(&ccw_device->dev); 211 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
212
213 if (!adapter)
214 return 1;
216 215
217 switch (event) { 216 switch (event) {
218 case CIO_GONE: 217 case CIO_GONE:
219 dev_warn(&adapter->ccw_device->dev, 218 dev_warn(&cdev->dev, "The FCP device has been detached\n");
220 "The FCP device has been detached\n");
221 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL); 219 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1", NULL);
222 break; 220 break;
223 case CIO_NO_PATH: 221 case CIO_NO_PATH:
224 dev_warn(&adapter->ccw_device->dev, 222 dev_warn(&cdev->dev,
225 "The CHPID for the FCP device is offline\n"); 223 "The CHPID for the FCP device is offline\n");
226 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL); 224 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2", NULL);
227 break; 225 break;
228 case CIO_OPER: 226 case CIO_OPER:
229 dev_info(&adapter->ccw_device->dev, 227 dev_info(&cdev->dev, "The FCP device is operational again\n");
230 "The FCP device is operational again\n");
231 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL, 228 zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
232 ZFCP_STATUS_COMMON_RUNNING, 229 ZFCP_STATUS_COMMON_RUNNING,
233 ZFCP_SET); 230 ZFCP_SET);
@@ -235,11 +232,13 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
235 "ccnoti4", NULL); 232 "ccnoti4", NULL);
236 break; 233 break;
237 case CIO_BOXED: 234 case CIO_BOXED:
238 dev_warn(&adapter->ccw_device->dev, "The FCP device " 235 dev_warn(&cdev->dev, "The FCP device did not respond within "
239 "did not respond within the specified time\n"); 236 "the specified time\n");
240 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); 237 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
241 break; 238 break;
242 } 239 }
240
241 zfcp_ccw_adapter_put(adapter);
243 return 1; 242 return 1;
244} 243}
245 244
@@ -249,17 +248,19 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
249 */ 248 */
250static void zfcp_ccw_shutdown(struct ccw_device *cdev) 249static void zfcp_ccw_shutdown(struct ccw_device *cdev)
251{ 250{
252 struct zfcp_adapter *adapter; 251 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
252
253 if (!adapter)
254 return;
253 255
254 mutex_lock(&zfcp_data.config_mutex);
255 adapter = dev_get_drvdata(&cdev->dev);
256 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL); 256 zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
257 zfcp_erp_wait(adapter); 257 zfcp_erp_wait(adapter);
258 zfcp_erp_thread_kill(adapter); 258 zfcp_erp_thread_kill(adapter);
259 mutex_unlock(&zfcp_data.config_mutex); 259
260 zfcp_ccw_adapter_put(adapter);
260} 261}
261 262
262static struct ccw_driver zfcp_ccw_driver = { 263struct ccw_driver zfcp_ccw_driver = {
263 .owner = THIS_MODULE, 264 .owner = THIS_MODULE,
264 .name = "zfcp", 265 .name = "zfcp",
265 .ids = zfcp_ccw_device_id, 266 .ids = zfcp_ccw_device_id,
@@ -269,35 +270,7 @@ static struct ccw_driver zfcp_ccw_driver = {
269 .set_offline = zfcp_ccw_set_offline, 270 .set_offline = zfcp_ccw_set_offline,
270 .notify = zfcp_ccw_notify, 271 .notify = zfcp_ccw_notify,
271 .shutdown = zfcp_ccw_shutdown, 272 .shutdown = zfcp_ccw_shutdown,
272 .freeze = zfcp_ccw_suspend, 273 .freeze = zfcp_ccw_set_offline,
273 .thaw = zfcp_ccw_activate, 274 .thaw = zfcp_ccw_activate,
274 .restore = zfcp_ccw_activate, 275 .restore = zfcp_ccw_activate,
275}; 276};
276
277/**
278 * zfcp_ccw_register - ccw register function
279 *
280 * Registers the driver at the common i/o layer. This function will be called
281 * at module load time/system start.
282 */
283int __init zfcp_ccw_register(void)
284{
285 return ccw_driver_register(&zfcp_ccw_driver);
286}
287
288/**
289 * zfcp_get_adapter_by_busid - find zfcp_adapter struct
290 * @busid: bus id string of zfcp adapter to find
291 */
292struct zfcp_adapter *zfcp_get_adapter_by_busid(char *busid)
293{
294 struct ccw_device *ccw_device;
295 struct zfcp_adapter *adapter = NULL;
296
297 ccw_device = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
298 if (ccw_device) {
299 adapter = dev_get_drvdata(&ccw_device->dev);
300 put_device(&ccw_device->dev);
301 }
302 return adapter;
303}
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 8305c874e86f..f932400e980a 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -86,8 +86,18 @@ static int zfcp_cfdc_copy_to_user(void __user *user_buffer,
86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno) 86static struct zfcp_adapter *zfcp_cfdc_get_adapter(u32 devno)
87{ 87{
88 char busid[9]; 88 char busid[9];
89 struct ccw_device *cdev;
90 struct zfcp_adapter *adapter;
91
89 snprintf(busid, sizeof(busid), "0.0.%04x", devno); 92 snprintf(busid, sizeof(busid), "0.0.%04x", devno);
90 return zfcp_get_adapter_by_busid(busid); 93 cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
94 if (!cdev)
95 return NULL;
96
97 adapter = zfcp_ccw_adapter_by_cdev(cdev);
98
99 put_device(&cdev->dev);
100 return adapter;
91} 101}
92 102
93static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command) 103static int zfcp_cfdc_set_fsf(struct zfcp_fsf_cfdc *fsf_cfdc, int command)
@@ -197,7 +207,6 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
197 retval = -ENXIO; 207 retval = -ENXIO;
198 goto free_buffer; 208 goto free_buffer;
199 } 209 }
200 zfcp_adapter_get(adapter);
201 210
202 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg, 211 retval = zfcp_cfdc_sg_setup(data->command, fsf_cfdc->sg,
203 data_user->control_file); 212 data_user->control_file);
@@ -230,7 +239,7 @@ static long zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
230 free_sg: 239 free_sg:
231 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES); 240 zfcp_sg_free_table(fsf_cfdc->sg, ZFCP_CFDC_PAGES);
232 adapter_put: 241 adapter_put:
233 zfcp_adapter_put(adapter); 242 zfcp_ccw_adapter_put(adapter);
234 free_buffer: 243 free_buffer:
235 kfree(data); 244 kfree(data);
236 no_mem_sense: 245 no_mem_sense:
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 215b70749e95..84450955ae11 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -13,6 +13,7 @@
13#include <asm/debug.h> 13#include <asm/debug.h>
14#include "zfcp_dbf.h" 14#include "zfcp_dbf.h"
15#include "zfcp_ext.h" 15#include "zfcp_ext.h"
16#include "zfcp_fc.h"
16 17
17static u32 dbfsize = 4; 18static u32 dbfsize = 4;
18 19
@@ -177,8 +178,7 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
177 178
178 case FSF_QTCB_SEND_ELS: 179 case FSF_QTCB_SEND_ELS:
179 send_els = (struct zfcp_send_els *)fsf_req->data; 180 send_els = (struct zfcp_send_els *)fsf_req->data;
180 response->u.els.d_id = qtcb->bottom.support.d_id; 181 response->u.els.d_id = ntoh24(qtcb->bottom.support.d_id);
181 response->u.els.ls_code = send_els->ls_code >> 24;
182 break; 182 break;
183 183
184 case FSF_QTCB_ABORT_FCP_CMND: 184 case FSF_QTCB_ABORT_FCP_CMND:
@@ -348,7 +348,6 @@ static void zfcp_dbf_hba_view_response(char **p,
348 348
349 case FSF_QTCB_SEND_ELS: 349 case FSF_QTCB_SEND_ELS:
350 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id); 350 zfcp_dbf_out(p, "d_id", "0x%06x", r->u.els.d_id);
351 zfcp_dbf_out(p, "ls_code", "0x%02x", r->u.els.ls_code);
352 break; 351 break;
353 352
354 case FSF_QTCB_ABORT_FCP_CMND: 353 case FSF_QTCB_ABORT_FCP_CMND:
@@ -677,14 +676,14 @@ void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
677/** 676/**
678 * zfcp_dbf_san_ct_request - trace event for issued CT request 677 * zfcp_dbf_san_ct_request - trace event for issued CT request
679 * @fsf_req: request containing issued CT data 678 * @fsf_req: request containing issued CT data
679 * @d_id: destination id where ct request is sent to
680 */ 680 */
681void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req) 681void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req, u32 d_id)
682{ 682{
683 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 683 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
684 struct zfcp_wka_port *wka_port = ct->wka_port; 684 struct zfcp_adapter *adapter = fsf_req->adapter;
685 struct zfcp_adapter *adapter = wka_port->adapter;
686 struct zfcp_dbf *dbf = adapter->dbf; 685 struct zfcp_dbf *dbf = adapter->dbf;
687 struct ct_hdr *hdr = sg_virt(ct->req); 686 struct fc_ct_hdr *hdr = sg_virt(ct->req);
688 struct zfcp_dbf_san_record *r = &dbf->san_buf; 687 struct zfcp_dbf_san_record *r = &dbf->san_buf;
689 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req; 688 struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
690 int level = 3; 689 int level = 3;
@@ -695,19 +694,18 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
695 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); 694 strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
696 r->fsf_reqid = fsf_req->req_id; 695 r->fsf_reqid = fsf_req->req_id;
697 r->fsf_seqno = fsf_req->seq_no; 696 r->fsf_seqno = fsf_req->seq_no;
698 r->s_id = fc_host_port_id(adapter->scsi_host); 697 oct->d_id = d_id;
699 r->d_id = wka_port->d_id; 698 oct->cmd_req_code = hdr->ct_cmd;
700 oct->cmd_req_code = hdr->cmd_rsp_code; 699 oct->revision = hdr->ct_rev;
701 oct->revision = hdr->revision; 700 oct->gs_type = hdr->ct_fs_type;
702 oct->gs_type = hdr->gs_type; 701 oct->gs_subtype = hdr->ct_fs_subtype;
703 oct->gs_subtype = hdr->gs_subtype; 702 oct->options = hdr->ct_options;
704 oct->options = hdr->options; 703 oct->max_res_size = hdr->ct_mr_size;
705 oct->max_res_size = hdr->max_res_size; 704 oct->len = min((int)ct->req->length - (int)sizeof(struct fc_ct_hdr),
706 oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
707 ZFCP_DBF_SAN_MAX_PAYLOAD); 705 ZFCP_DBF_SAN_MAX_PAYLOAD);
708 debug_event(dbf->san, level, r, sizeof(*r)); 706 debug_event(dbf->san, level, r, sizeof(*r));
709 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, 707 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
710 (void *)hdr + sizeof(struct ct_hdr), oct->len); 708 (void *)hdr + sizeof(struct fc_ct_hdr), oct->len);
711 spin_unlock_irqrestore(&dbf->san_lock, flags); 709 spin_unlock_irqrestore(&dbf->san_lock, flags);
712} 710}
713 711
@@ -717,10 +715,9 @@ void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
717 */ 715 */
718void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req) 716void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
719{ 717{
720 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 718 struct zfcp_fsf_ct_els *ct = (struct zfcp_fsf_ct_els *)fsf_req->data;
721 struct zfcp_wka_port *wka_port = ct->wka_port; 719 struct zfcp_adapter *adapter = fsf_req->adapter;
722 struct zfcp_adapter *adapter = wka_port->adapter; 720 struct fc_ct_hdr *hdr = sg_virt(ct->resp);
723 struct ct_hdr *hdr = sg_virt(ct->resp);
724 struct zfcp_dbf *dbf = adapter->dbf; 721 struct zfcp_dbf *dbf = adapter->dbf;
725 struct zfcp_dbf_san_record *r = &dbf->san_buf; 722 struct zfcp_dbf_san_record *r = &dbf->san_buf;
726 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp; 723 struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
@@ -732,25 +729,23 @@ void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
732 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); 729 strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
733 r->fsf_reqid = fsf_req->req_id; 730 r->fsf_reqid = fsf_req->req_id;
734 r->fsf_seqno = fsf_req->seq_no; 731 r->fsf_seqno = fsf_req->seq_no;
735 r->s_id = wka_port->d_id; 732 rct->cmd_rsp_code = hdr->ct_cmd;
736 r->d_id = fc_host_port_id(adapter->scsi_host); 733 rct->revision = hdr->ct_rev;
737 rct->cmd_rsp_code = hdr->cmd_rsp_code; 734 rct->reason_code = hdr->ct_reason;
738 rct->revision = hdr->revision; 735 rct->expl = hdr->ct_explan;
739 rct->reason_code = hdr->reason_code; 736 rct->vendor_unique = hdr->ct_vendor;
740 rct->expl = hdr->reason_code_expl; 737 rct->max_res_size = hdr->ct_mr_size;
741 rct->vendor_unique = hdr->vendor_unique; 738 rct->len = min((int)ct->resp->length - (int)sizeof(struct fc_ct_hdr),
742 rct->max_res_size = hdr->max_res_size;
743 rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
744 ZFCP_DBF_SAN_MAX_PAYLOAD); 739 ZFCP_DBF_SAN_MAX_PAYLOAD);
745 debug_event(dbf->san, level, r, sizeof(*r)); 740 debug_event(dbf->san, level, r, sizeof(*r));
746 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level, 741 zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
747 (void *)hdr + sizeof(struct ct_hdr), rct->len); 742 (void *)hdr + sizeof(struct fc_ct_hdr), rct->len);
748 spin_unlock_irqrestore(&dbf->san_lock, flags); 743 spin_unlock_irqrestore(&dbf->san_lock, flags);
749} 744}
750 745
751static void zfcp_dbf_san_els(const char *tag, int level, 746static void zfcp_dbf_san_els(const char *tag, int level,
752 struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id, 747 struct zfcp_fsf_req *fsf_req, u32 d_id,
753 u8 ls_code, void *buffer, int buflen) 748 void *buffer, int buflen)
754{ 749{
755 struct zfcp_adapter *adapter = fsf_req->adapter; 750 struct zfcp_adapter *adapter = fsf_req->adapter;
756 struct zfcp_dbf *dbf = adapter->dbf; 751 struct zfcp_dbf *dbf = adapter->dbf;
@@ -762,9 +757,7 @@ static void zfcp_dbf_san_els(const char *tag, int level,
762 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); 757 strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
763 rec->fsf_reqid = fsf_req->req_id; 758 rec->fsf_reqid = fsf_req->req_id;
764 rec->fsf_seqno = fsf_req->seq_no; 759 rec->fsf_seqno = fsf_req->seq_no;
765 rec->s_id = s_id; 760 rec->u.els.d_id = d_id;
766 rec->d_id = d_id;
767 rec->u.els.ls_code = ls_code;
768 debug_event(dbf->san, level, rec, sizeof(*rec)); 761 debug_event(dbf->san, level, rec, sizeof(*rec));
769 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level, 762 zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
770 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD)); 763 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
@@ -777,12 +770,11 @@ static void zfcp_dbf_san_els(const char *tag, int level,
777 */ 770 */
778void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req) 771void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
779{ 772{
780 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 773 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
774 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
781 775
782 zfcp_dbf_san_els("oels", 2, fsf_req, 776 zfcp_dbf_san_els("oels", 2, fsf_req, d_id,
783 fc_host_port_id(els->adapter->scsi_host), 777 sg_virt(els->req), els->req->length);
784 els->d_id, *(u8 *) sg_virt(els->req),
785 sg_virt(els->req), els->req->length);
786} 778}
787 779
788/** 780/**
@@ -791,12 +783,11 @@ void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
791 */ 783 */
792void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req) 784void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
793{ 785{
794 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 786 struct zfcp_fsf_ct_els *els = (struct zfcp_fsf_ct_els *)fsf_req->data;
787 u32 d_id = ntoh24(fsf_req->qtcb->bottom.support.d_id);
795 788
796 zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id, 789 zfcp_dbf_san_els("rels", 2, fsf_req, d_id,
797 fc_host_port_id(els->adapter->scsi_host), 790 sg_virt(els->resp), els->resp->length);
798 *(u8 *)sg_virt(els->req), sg_virt(els->resp),
799 els->resp->length);
800} 791}
801 792
802/** 793/**
@@ -805,16 +796,13 @@ void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
805 */ 796 */
806void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req) 797void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
807{ 798{
808 struct zfcp_adapter *adapter = fsf_req->adapter;
809 struct fsf_status_read_buffer *buf = 799 struct fsf_status_read_buffer *buf =
810 (struct fsf_status_read_buffer *)fsf_req->data; 800 (struct fsf_status_read_buffer *)fsf_req->data;
811 int length = (int)buf->length - 801 int length = (int)buf->length -
812 (int)((void *)&buf->payload - (void *)buf); 802 (int)((void *)&buf->payload - (void *)buf);
813 803
814 zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id, 804 zfcp_dbf_san_els("iels", 1, fsf_req, ntoh24(buf->d_id),
815 fc_host_port_id(adapter->scsi_host), 805 (void *)buf->payload.data, length);
816 buf->payload.data[0], (void *)buf->payload.data,
817 length);
818} 806}
819 807
820static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view, 808static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
@@ -829,11 +817,10 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
829 zfcp_dbf_tag(&p, "tag", r->tag); 817 zfcp_dbf_tag(&p, "tag", r->tag);
830 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid); 818 zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
831 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno); 819 zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
832 zfcp_dbf_out(&p, "s_id", "0x%06x", r->s_id);
833 zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
834 820
835 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) { 821 if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
836 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req; 822 struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
823 zfcp_dbf_out(&p, "d_id", "0x%06x", ct->d_id);
837 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code); 824 zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
838 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision); 825 zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
839 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type); 826 zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
@@ -852,7 +839,7 @@ static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
852 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 || 839 strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
853 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) { 840 strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
854 struct zfcp_dbf_san_record_els *els = &r->u.els; 841 struct zfcp_dbf_san_record_els *els = &r->u.els;
855 zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code); 842 zfcp_dbf_out(&p, "d_id", "0x%06x", els->d_id);
856 } 843 }
857 return p - out_buf; 844 return p - out_buf;
858} 845}
@@ -870,8 +857,9 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
870 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf; 857 struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
871 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec; 858 struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
872 unsigned long flags; 859 unsigned long flags;
873 struct fcp_rsp_iu *fcp_rsp; 860 struct fcp_resp_with_ext *fcp_rsp;
874 char *fcp_rsp_info = NULL, *fcp_sns_info = NULL; 861 struct fcp_resp_rsp_info *fcp_rsp_info = NULL;
862 char *fcp_sns_info = NULL;
875 int offset = 0, buflen = 0; 863 int offset = 0, buflen = 0;
876 864
877 spin_lock_irqsave(&dbf->scsi_lock, flags); 865 spin_lock_irqsave(&dbf->scsi_lock, flags);
@@ -895,20 +883,22 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
895 rec->scsi_allowed = scsi_cmnd->allowed; 883 rec->scsi_allowed = scsi_cmnd->allowed;
896 } 884 }
897 if (fsf_req != NULL) { 885 if (fsf_req != NULL) {
898 fcp_rsp = (struct fcp_rsp_iu *) 886 fcp_rsp = (struct fcp_resp_with_ext *)
899 &(fsf_req->qtcb->bottom.io.fcp_rsp); 887 &(fsf_req->qtcb->bottom.io.fcp_rsp);
900 fcp_rsp_info = (unsigned char *) &fcp_rsp[1]; 888 fcp_rsp_info = (struct fcp_resp_rsp_info *)
901 fcp_sns_info = 889 &fcp_rsp[1];
902 zfcp_get_fcp_sns_info_ptr(fcp_rsp); 890 fcp_sns_info = (char *) &fcp_rsp[1];
903 891 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
904 rec->rsp_validity = fcp_rsp->validity.value; 892 fcp_sns_info += fcp_rsp->ext.fr_sns_len;
905 rec->rsp_scsi_status = fcp_rsp->scsi_status; 893
906 rec->rsp_resid = fcp_rsp->fcp_resid; 894 rec->rsp_validity = fcp_rsp->resp.fr_flags;
907 if (fcp_rsp->validity.bits.fcp_rsp_len_valid) 895 rec->rsp_scsi_status = fcp_rsp->resp.fr_status;
908 rec->rsp_code = *(fcp_rsp_info + 3); 896 rec->rsp_resid = fcp_rsp->ext.fr_resid;
909 if (fcp_rsp->validity.bits.fcp_sns_len_valid) { 897 if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL)
910 buflen = min((int)fcp_rsp->fcp_sns_len, 898 rec->rsp_code = fcp_rsp_info->rsp_code;
911 ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO); 899 if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
900 buflen = min(fcp_rsp->ext.fr_sns_len,
901 (u32)ZFCP_DBF_SCSI_MAX_FCP_SNS_INFO);
912 rec->sns_info_len = buflen; 902 rec->sns_info_len = buflen;
913 memcpy(rec->sns_info, fcp_sns_info, 903 memcpy(rec->sns_info, fcp_sns_info,
914 min(buflen, 904 min(buflen,
@@ -1067,6 +1057,8 @@ err_out:
1067 */ 1057 */
1068void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf) 1058void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
1069{ 1059{
1060 if (!dbf)
1061 return;
1070 debug_unregister(dbf->scsi); 1062 debug_unregister(dbf->scsi);
1071 debug_unregister(dbf->san); 1063 debug_unregister(dbf->san);
1072 debug_unregister(dbf->hba); 1064 debug_unregister(dbf->hba);
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 6b1461e8f847..8b7fd9a1033e 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -22,6 +22,7 @@
22#ifndef ZFCP_DBF_H 22#ifndef ZFCP_DBF_H
23#define ZFCP_DBF_H 23#define ZFCP_DBF_H
24 24
25#include <scsi/fc/fc_fcp.h>
25#include "zfcp_ext.h" 26#include "zfcp_ext.h"
26#include "zfcp_fsf.h" 27#include "zfcp_fsf.h"
27#include "zfcp_def.h" 28#include "zfcp_def.h"
@@ -122,7 +123,6 @@ struct zfcp_dbf_hba_record_response {
122 } unit; 123 } unit;
123 struct { 124 struct {
124 u32 d_id; 125 u32 d_id;
125 u8 ls_code;
126 } els; 126 } els;
127 } u; 127 } u;
128} __attribute__ ((packed)); 128} __attribute__ ((packed));
@@ -166,6 +166,7 @@ struct zfcp_dbf_san_record_ct_request {
166 u8 options; 166 u8 options;
167 u16 max_res_size; 167 u16 max_res_size;
168 u32 len; 168 u32 len;
169 u32 d_id;
169} __attribute__ ((packed)); 170} __attribute__ ((packed));
170 171
171struct zfcp_dbf_san_record_ct_response { 172struct zfcp_dbf_san_record_ct_response {
@@ -179,16 +180,13 @@ struct zfcp_dbf_san_record_ct_response {
179} __attribute__ ((packed)); 180} __attribute__ ((packed));
180 181
181struct zfcp_dbf_san_record_els { 182struct zfcp_dbf_san_record_els {
182 u8 ls_code; 183 u32 d_id;
183 u32 len;
184} __attribute__ ((packed)); 184} __attribute__ ((packed));
185 185
186struct zfcp_dbf_san_record { 186struct zfcp_dbf_san_record {
187 u8 tag[ZFCP_DBF_TAG_SIZE]; 187 u8 tag[ZFCP_DBF_TAG_SIZE];
188 u64 fsf_reqid; 188 u64 fsf_reqid;
189 u32 fsf_seqno; 189 u32 fsf_seqno;
190 u32 s_id;
191 u32 d_id;
192 union { 190 union {
193 struct zfcp_dbf_san_record_ct_request ct_req; 191 struct zfcp_dbf_san_record_ct_request ct_req;
194 struct zfcp_dbf_san_record_ct_response ct_resp; 192 struct zfcp_dbf_san_record_ct_response ct_resp;
@@ -343,7 +341,7 @@ static inline
343void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 341void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
344 struct scsi_cmnd *scsi_cmnd) 342 struct scsi_cmnd *scsi_cmnd)
345{ 343{
346 zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1, 344 zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
347 unit->port->adapter->dbf, scsi_cmnd, NULL, 0); 345 unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
348} 346}
349 347
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 7da2fad8f515..e1b5b88e2ddb 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -71,131 +71,6 @@
71/* timeout value for "default timer" for fsf requests */ 71/* timeout value for "default timer" for fsf requests */
72#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 72#define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
73 73
74/*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
75
76/* task attribute values in FCP-2 FCP_CMND IU */
77#define SIMPLE_Q 0
78#define HEAD_OF_Q 1
79#define ORDERED_Q 2
80#define ACA_Q 4
81#define UNTAGGED 5
82
83/* task management flags in FCP-2 FCP_CMND IU */
84#define FCP_CLEAR_ACA 0x40
85#define FCP_TARGET_RESET 0x20
86#define FCP_LOGICAL_UNIT_RESET 0x10
87#define FCP_CLEAR_TASK_SET 0x04
88#define FCP_ABORT_TASK_SET 0x02
89
90#define FCP_CDB_LENGTH 16
91
92#define ZFCP_DID_MASK 0x00FFFFFF
93
94/* FCP(-2) FCP_CMND IU */
95struct fcp_cmnd_iu {
96 u64 fcp_lun; /* FCP logical unit number */
97 u8 crn; /* command reference number */
98 u8 reserved0:5; /* reserved */
99 u8 task_attribute:3; /* task attribute */
100 u8 task_management_flags; /* task management flags */
101 u8 add_fcp_cdb_length:6; /* additional FCP_CDB length */
102 u8 rddata:1; /* read data */
103 u8 wddata:1; /* write data */
104 u8 fcp_cdb[FCP_CDB_LENGTH];
105} __attribute__((packed));
106
107/* FCP(-2) FCP_RSP IU */
108struct fcp_rsp_iu {
109 u8 reserved0[10];
110 union {
111 struct {
112 u8 reserved1:3;
113 u8 fcp_conf_req:1;
114 u8 fcp_resid_under:1;
115 u8 fcp_resid_over:1;
116 u8 fcp_sns_len_valid:1;
117 u8 fcp_rsp_len_valid:1;
118 } bits;
119 u8 value;
120 } validity;
121 u8 scsi_status;
122 u32 fcp_resid;
123 u32 fcp_sns_len;
124 u32 fcp_rsp_len;
125} __attribute__((packed));
126
127
128#define RSP_CODE_GOOD 0
129#define RSP_CODE_LENGTH_MISMATCH 1
130#define RSP_CODE_FIELD_INVALID 2
131#define RSP_CODE_RO_MISMATCH 3
132#define RSP_CODE_TASKMAN_UNSUPP 4
133#define RSP_CODE_TASKMAN_FAILED 5
134
135/* see fc-fs */
136#define LS_RSCN 0x61
137#define LS_LOGO 0x05
138#define LS_PLOGI 0x03
139
140struct fcp_rscn_head {
141 u8 command;
142 u8 page_length; /* always 0x04 */
143 u16 payload_len;
144} __attribute__((packed));
145
146struct fcp_rscn_element {
147 u8 reserved:2;
148 u8 event_qual:4;
149 u8 addr_format:2;
150 u32 nport_did:24;
151} __attribute__((packed));
152
153/* see fc-ph */
154struct fcp_logo {
155 u32 command;
156 u32 nport_did;
157 u64 nport_wwpn;
158} __attribute__((packed));
159
160/*
161 * FC-FS stuff
162 */
163#define R_A_TOV 10 /* seconds */
164
165#define ZFCP_LS_RLS 0x0f
166#define ZFCP_LS_ADISC 0x52
167#define ZFCP_LS_RPS 0x56
168#define ZFCP_LS_RSCN 0x61
169#define ZFCP_LS_RNID 0x78
170
171struct zfcp_ls_adisc {
172 u8 code;
173 u8 field[3];
174 u32 hard_nport_id;
175 u64 wwpn;
176 u64 wwnn;
177 u32 nport_id;
178} __attribute__ ((packed));
179
180/*
181 * FC-GS-2 stuff
182 */
183#define ZFCP_CT_REVISION 0x01
184#define ZFCP_CT_DIRECTORY_SERVICE 0xFC
185#define ZFCP_CT_NAME_SERVER 0x02
186#define ZFCP_CT_SYNCHRONOUS 0x00
187#define ZFCP_CT_SCSI_FCP 0x08
188#define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
189#define ZFCP_CT_GID_PN 0x0121
190#define ZFCP_CT_GPN_FT 0x0172
191#define ZFCP_CT_ACCEPT 0x8002
192#define ZFCP_CT_REJECT 0x8001
193
194/*
195 * FC-GS-4 stuff
196 */
197#define ZFCP_CT_TIMEOUT (3 * R_A_TOV)
198
199/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/ 74/*************** ADAPTER/PORT/UNIT AND FSF_REQ STATUS FLAGS ******************/
200 75
201/* 76/*
@@ -205,7 +80,6 @@ struct zfcp_ls_adisc {
205#define ZFCP_COMMON_FLAGS 0xfff00000 80#define ZFCP_COMMON_FLAGS 0xfff00000
206 81
207/* common status bits */ 82/* common status bits */
208#define ZFCP_STATUS_COMMON_REMOVE 0x80000000
209#define ZFCP_STATUS_COMMON_RUNNING 0x40000000 83#define ZFCP_STATUS_COMMON_RUNNING 0x40000000
210#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000 84#define ZFCP_STATUS_COMMON_ERP_FAILED 0x20000000
211#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000 85#define ZFCP_STATUS_COMMON_UNBLOCKED 0x10000000
@@ -222,21 +96,10 @@ struct zfcp_ls_adisc {
222#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 96#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
223#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 97#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
224 98
225/* FC-PH/FC-GS well-known address identifiers for generic services */
226#define ZFCP_DID_WKA 0xFFFFF0
227
228/* remote port status */ 99/* remote port status */
229#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001 100#define ZFCP_STATUS_PORT_PHYS_OPEN 0x00000001
230#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002 101#define ZFCP_STATUS_PORT_LINK_TEST 0x00000002
231 102
232/* well known address (WKA) port status*/
233enum zfcp_wka_status {
234 ZFCP_WKA_PORT_OFFLINE,
235 ZFCP_WKA_PORT_CLOSING,
236 ZFCP_WKA_PORT_OPENING,
237 ZFCP_WKA_PORT_ONLINE,
238};
239
240/* logical unit status */ 103/* logical unit status */
241#define ZFCP_STATUS_UNIT_SHARED 0x00000004 104#define ZFCP_STATUS_UNIT_SHARED 0x00000004
242#define ZFCP_STATUS_UNIT_READONLY 0x00000008 105#define ZFCP_STATUS_UNIT_READONLY 0x00000008
@@ -247,10 +110,7 @@ enum zfcp_wka_status {
247#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 110#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
248#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 111#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
249#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080 112#define ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED 0x00000080
250#define ZFCP_STATUS_FSFREQ_ABORTED 0x00000100
251#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200 113#define ZFCP_STATUS_FSFREQ_TMFUNCFAILED 0x00000200
252#define ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP 0x00000400
253#define ZFCP_STATUS_FSFREQ_RETRY 0x00000800
254#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000 114#define ZFCP_STATUS_FSFREQ_DISMISSED 0x00001000
255 115
256/************************* STRUCTURE DEFINITIONS *****************************/ 116/************************* STRUCTURE DEFINITIONS *****************************/
@@ -265,125 +125,10 @@ struct zfcp_adapter_mempool {
265 mempool_t *scsi_abort; 125 mempool_t *scsi_abort;
266 mempool_t *status_read_req; 126 mempool_t *status_read_req;
267 mempool_t *status_read_data; 127 mempool_t *status_read_data;
268 mempool_t *gid_pn_data; 128 mempool_t *gid_pn;
269 mempool_t *qtcb_pool; 129 mempool_t *qtcb_pool;
270}; 130};
271 131
272/*
273 * header for CT_IU
274 */
275struct ct_hdr {
276 u8 revision; // 0x01
277 u8 in_id[3]; // 0x00
278 u8 gs_type; // 0xFC Directory Service
279 u8 gs_subtype; // 0x02 Name Server
280 u8 options; // 0x00 single bidirectional exchange
281 u8 reserved0;
282 u16 cmd_rsp_code; // 0x0121 GID_PN, or 0x0100 GA_NXT
283 u16 max_res_size; // <= (4096 - 16) / 4
284 u8 reserved1;
285 u8 reason_code;
286 u8 reason_code_expl;
287 u8 vendor_unique;
288} __attribute__ ((packed));
289
290/* nameserver request CT_IU -- for requests where
291 * a port name is required */
292struct ct_iu_gid_pn_req {
293 struct ct_hdr header;
294 u64 wwpn;
295} __attribute__ ((packed));
296
297/* FS_ACC IU and data unit for GID_PN nameserver request */
298struct ct_iu_gid_pn_resp {
299 struct ct_hdr header;
300 u32 d_id;
301} __attribute__ ((packed));
302
303struct ct_iu_gpn_ft_req {
304 struct ct_hdr header;
305 u8 flags;
306 u8 domain_id_scope;
307 u8 area_id_scope;
308 u8 fc4_type;
309} __attribute__ ((packed));
310
311
312/**
313 * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
314 * @wka_port: port where the request is sent to
315 * @req: scatter-gather list for request
316 * @resp: scatter-gather list for response
317 * @handler: handler function (called for response to the request)
318 * @handler_data: data passed to handler function
319 * @completion: completion for synchronization purposes
320 * @status: used to pass error status to calling function
321 */
322struct zfcp_send_ct {
323 struct zfcp_wka_port *wka_port;
324 struct scatterlist *req;
325 struct scatterlist *resp;
326 void (*handler)(unsigned long);
327 unsigned long handler_data;
328 struct completion *completion;
329 int status;
330};
331
332/* used for name server requests in error recovery */
333struct zfcp_gid_pn_data {
334 struct zfcp_send_ct ct;
335 struct scatterlist req;
336 struct scatterlist resp;
337 struct ct_iu_gid_pn_req ct_iu_req;
338 struct ct_iu_gid_pn_resp ct_iu_resp;
339 struct zfcp_port *port;
340};
341
342/**
343 * struct zfcp_send_els - used to pass parameters to function zfcp_fsf_send_els
344 * @adapter: adapter where request is sent from
345 * @port: port where ELS is destinated (port reference count has to be increased)
346 * @d_id: destiniation id of port where request is sent to
347 * @req: scatter-gather list for request
348 * @resp: scatter-gather list for response
349 * @handler: handler function (called for response to the request)
350 * @handler_data: data passed to handler function
351 * @completion: completion for synchronization purposes
352 * @ls_code: hex code of ELS command
353 * @status: used to pass error status to calling function
354 */
355struct zfcp_send_els {
356 struct zfcp_adapter *adapter;
357 struct zfcp_port *port;
358 u32 d_id;
359 struct scatterlist *req;
360 struct scatterlist *resp;
361 void (*handler)(unsigned long);
362 unsigned long handler_data;
363 struct completion *completion;
364 int ls_code;
365 int status;
366};
367
368struct zfcp_wka_port {
369 struct zfcp_adapter *adapter;
370 wait_queue_head_t completion_wq;
371 enum zfcp_wka_status status;
372 atomic_t refcount;
373 u32 d_id;
374 u32 handle;
375 struct mutex mutex;
376 struct delayed_work work;
377};
378
379struct zfcp_wka_ports {
380 struct zfcp_wka_port ms; /* management service */
381 struct zfcp_wka_port ts; /* time service */
382 struct zfcp_wka_port ds; /* directory service */
383 struct zfcp_wka_port as; /* alias service */
384 struct zfcp_wka_port ks; /* key distribution service */
385};
386
387struct zfcp_qdio_queue { 132struct zfcp_qdio_queue {
388 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; 133 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
389 u8 first; /* index of next free bfr in queue */ 134 u8 first; /* index of next free bfr in queue */
@@ -446,9 +191,7 @@ struct zfcp_qdio {
446}; 191};
447 192
448struct zfcp_adapter { 193struct zfcp_adapter {
449 atomic_t refcount; /* reference count */ 194 struct kref ref;
450 wait_queue_head_t remove_wq; /* can be used to wait for
451 refcount drop to zero */
452 u64 peer_wwnn; /* P2P peer WWNN */ 195 u64 peer_wwnn; /* P2P peer WWNN */
453 u64 peer_wwpn; /* P2P peer WWPN */ 196 u64 peer_wwpn; /* P2P peer WWPN */
454 u32 peer_d_id; /* P2P peer D_ID */ 197 u32 peer_d_id; /* P2P peer D_ID */
@@ -461,7 +204,8 @@ struct zfcp_adapter {
461 u32 hardware_version; /* of FCP channel */ 204 u32 hardware_version; /* of FCP channel */
462 u16 timer_ticks; /* time int for a tick */ 205 u16 timer_ticks; /* time int for a tick */
463 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */ 206 struct Scsi_Host *scsi_host; /* Pointer to mid-layer */
464 struct list_head port_list_head; /* remote port list */ 207 struct list_head port_list; /* remote port list */
208 rwlock_t port_list_lock; /* port list lock */
465 unsigned long req_no; /* unique FSF req number */ 209 unsigned long req_no; /* unique FSF req number */
466 struct list_head *req_list; /* list of pending reqs */ 210 struct list_head *req_list; /* list of pending reqs */
467 spinlock_t req_list_lock; /* request list lock */ 211 spinlock_t req_list_lock; /* request list lock */
@@ -485,7 +229,7 @@ struct zfcp_adapter {
485 u32 erp_low_mem_count; /* nr of erp actions waiting 229 u32 erp_low_mem_count; /* nr of erp actions waiting
486 for memory */ 230 for memory */
487 struct task_struct *erp_thread; 231 struct task_struct *erp_thread;
488 struct zfcp_wka_ports *gs; /* generic services */ 232 struct zfcp_fc_wka_ports *gs; /* generic services */
489 struct zfcp_dbf *dbf; /* debug traces */ 233 struct zfcp_dbf *dbf; /* debug traces */
490 struct zfcp_adapter_mempool pool; /* Adapter memory pools */ 234 struct zfcp_adapter_mempool pool; /* Adapter memory pools */
491 struct fc_host_statistics *fc_stats; 235 struct fc_host_statistics *fc_stats;
@@ -500,11 +244,9 @@ struct zfcp_port {
500 struct device sysfs_device; /* sysfs device */ 244 struct device sysfs_device; /* sysfs device */
501 struct fc_rport *rport; /* rport of fc transport class */ 245 struct fc_rport *rport; /* rport of fc transport class */
502 struct list_head list; /* list of remote ports */ 246 struct list_head list; /* list of remote ports */
503 atomic_t refcount; /* reference count */
504 wait_queue_head_t remove_wq; /* can be used to wait for
505 refcount drop to zero */
506 struct zfcp_adapter *adapter; /* adapter used to access port */ 247 struct zfcp_adapter *adapter; /* adapter used to access port */
507 struct list_head unit_list_head; /* head of logical unit list */ 248 struct list_head unit_list; /* head of logical unit list */
249 rwlock_t unit_list_lock; /* unit list lock */
508 atomic_t status; /* status of this remote port */ 250 atomic_t status; /* status of this remote port */
509 u64 wwnn; /* WWNN if known */ 251 u64 wwnn; /* WWNN if known */
510 u64 wwpn; /* WWPN */ 252 u64 wwpn; /* WWPN */
@@ -523,9 +265,6 @@ struct zfcp_port {
523struct zfcp_unit { 265struct zfcp_unit {
524 struct device sysfs_device; /* sysfs device */ 266 struct device sysfs_device; /* sysfs device */
525 struct list_head list; /* list of logical units */ 267 struct list_head list; /* list of logical units */
526 atomic_t refcount; /* reference count */
527 wait_queue_head_t remove_wq; /* can be used to wait for
528 refcount drop to zero */
529 struct zfcp_port *port; /* remote port of unit */ 268 struct zfcp_port *port; /* remote port of unit */
530 atomic_t status; /* status of this logical unit */ 269 atomic_t status; /* status of this logical unit */
531 u64 fcp_lun; /* own FCP_LUN */ 270 u64 fcp_lun; /* own FCP_LUN */
@@ -601,14 +340,11 @@ struct zfcp_fsf_req {
601struct zfcp_data { 340struct zfcp_data {
602 struct scsi_host_template scsi_host_template; 341 struct scsi_host_template scsi_host_template;
603 struct scsi_transport_template *scsi_transport_template; 342 struct scsi_transport_template *scsi_transport_template;
604 rwlock_t config_lock; /* serialises changes
605 to adapter/port/unit
606 lists */
607 struct mutex config_mutex;
608 struct kmem_cache *gpn_ft_cache; 343 struct kmem_cache *gpn_ft_cache;
609 struct kmem_cache *qtcb_cache; 344 struct kmem_cache *qtcb_cache;
610 struct kmem_cache *sr_buffer_cache; 345 struct kmem_cache *sr_buffer_cache;
611 struct kmem_cache *gid_pn_cache; 346 struct kmem_cache *gid_pn_cache;
347 struct kmem_cache *adisc_cache;
612}; 348};
613 349
614/********************** ZFCP SPECIFIC DEFINES ********************************/ 350/********************** ZFCP SPECIFIC DEFINES ********************************/
@@ -657,47 +393,4 @@ zfcp_reqlist_find_safe(struct zfcp_adapter *adapter, struct zfcp_fsf_req *req)
657 return NULL; 393 return NULL;
658} 394}
659 395
660/*
661 * functions needed for reference/usage counting
662 */
663
664static inline void
665zfcp_unit_get(struct zfcp_unit *unit)
666{
667 atomic_inc(&unit->refcount);
668}
669
670static inline void
671zfcp_unit_put(struct zfcp_unit *unit)
672{
673 if (atomic_dec_return(&unit->refcount) == 0)
674 wake_up(&unit->remove_wq);
675}
676
677static inline void
678zfcp_port_get(struct zfcp_port *port)
679{
680 atomic_inc(&port->refcount);
681}
682
683static inline void
684zfcp_port_put(struct zfcp_port *port)
685{
686 if (atomic_dec_return(&port->refcount) == 0)
687 wake_up(&port->remove_wq);
688}
689
690static inline void
691zfcp_adapter_get(struct zfcp_adapter *adapter)
692{
693 atomic_inc(&adapter->refcount);
694}
695
696static inline void
697zfcp_adapter_put(struct zfcp_adapter *adapter)
698{
699 if (atomic_dec_return(&adapter->refcount) == 0)
700 wake_up(&adapter->remove_wq);
701}
702
703#endif /* ZFCP_DEF_H */ 396#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 73d366ba31e5..b51a11a82e63 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -99,9 +99,12 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
99 99
100 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 100 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
101 zfcp_erp_action_dismiss(&port->erp_action); 101 zfcp_erp_action_dismiss(&port->erp_action);
102 else 102 else {
103 list_for_each_entry(unit, &port->unit_list_head, list) 103 read_lock(&port->unit_list_lock);
104 zfcp_erp_action_dismiss_unit(unit); 104 list_for_each_entry(unit, &port->unit_list, list)
105 zfcp_erp_action_dismiss_unit(unit);
106 read_unlock(&port->unit_list_lock);
107 }
105} 108}
106 109
107static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 110static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -110,9 +113,12 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
110 113
111 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 114 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
112 zfcp_erp_action_dismiss(&adapter->erp_action); 115 zfcp_erp_action_dismiss(&adapter->erp_action);
113 else 116 else {
114 list_for_each_entry(port, &adapter->port_list_head, list) 117 read_lock(&adapter->port_list_lock);
118 list_for_each_entry(port, &adapter->port_list, list)
115 zfcp_erp_action_dismiss_port(port); 119 zfcp_erp_action_dismiss_port(port);
120 read_unlock(&adapter->port_list_lock);
121 }
116} 122}
117 123
118static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, 124static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
@@ -168,7 +174,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
168 174
169 switch (need) { 175 switch (need) {
170 case ZFCP_ERP_ACTION_REOPEN_UNIT: 176 case ZFCP_ERP_ACTION_REOPEN_UNIT:
171 zfcp_unit_get(unit); 177 if (!get_device(&unit->sysfs_device))
178 return NULL;
172 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status); 179 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
173 erp_action = &unit->erp_action; 180 erp_action = &unit->erp_action;
174 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING)) 181 if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
@@ -177,7 +184,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
177 184
178 case ZFCP_ERP_ACTION_REOPEN_PORT: 185 case ZFCP_ERP_ACTION_REOPEN_PORT:
179 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 186 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
180 zfcp_port_get(port); 187 if (!get_device(&port->sysfs_device))
188 return NULL;
181 zfcp_erp_action_dismiss_port(port); 189 zfcp_erp_action_dismiss_port(port);
182 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); 190 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
183 erp_action = &port->erp_action; 191 erp_action = &port->erp_action;
@@ -186,7 +194,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
186 break; 194 break;
187 195
188 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 196 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
189 zfcp_adapter_get(adapter); 197 kref_get(&adapter->ref);
190 zfcp_erp_action_dismiss_adapter(adapter); 198 zfcp_erp_action_dismiss_adapter(adapter);
191 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); 199 atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
192 erp_action = &adapter->erp_action; 200 erp_action = &adapter->erp_action;
@@ -264,11 +272,16 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
264{ 272{
265 unsigned long flags; 273 unsigned long flags;
266 274
267 read_lock_irqsave(&zfcp_data.config_lock, flags); 275 zfcp_erp_adapter_block(adapter, clear);
268 write_lock(&adapter->erp_lock); 276 zfcp_scsi_schedule_rports_block(adapter);
269 _zfcp_erp_adapter_reopen(adapter, clear, id, ref); 277
270 write_unlock(&adapter->erp_lock); 278 write_lock_irqsave(&adapter->erp_lock, flags);
271 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 279 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
280 zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
281 else
282 zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
283 NULL, NULL, id, ref);
284 write_unlock_irqrestore(&adapter->erp_lock, flags);
272} 285}
273 286
274/** 287/**
@@ -345,11 +358,9 @@ void zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear, char *id,
345 unsigned long flags; 358 unsigned long flags;
346 struct zfcp_adapter *adapter = port->adapter; 359 struct zfcp_adapter *adapter = port->adapter;
347 360
348 read_lock_irqsave(&zfcp_data.config_lock, flags); 361 write_lock_irqsave(&adapter->erp_lock, flags);
349 write_lock(&adapter->erp_lock);
350 _zfcp_erp_port_forced_reopen(port, clear, id, ref); 362 _zfcp_erp_port_forced_reopen(port, clear, id, ref);
351 write_unlock(&adapter->erp_lock); 363 write_unlock_irqrestore(&adapter->erp_lock, flags);
352 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
353} 364}
354 365
355static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, 366static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
@@ -377,15 +388,13 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
377 */ 388 */
378int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref) 389int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
379{ 390{
380 unsigned long flags;
381 int retval; 391 int retval;
392 unsigned long flags;
382 struct zfcp_adapter *adapter = port->adapter; 393 struct zfcp_adapter *adapter = port->adapter;
383 394
384 read_lock_irqsave(&zfcp_data.config_lock, flags); 395 write_lock_irqsave(&adapter->erp_lock, flags);
385 write_lock(&adapter->erp_lock);
386 retval = _zfcp_erp_port_reopen(port, clear, id, ref); 396 retval = _zfcp_erp_port_reopen(port, clear, id, ref);
387 write_unlock(&adapter->erp_lock); 397 write_unlock_irqrestore(&adapter->erp_lock, flags);
388 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
389 398
390 return retval; 399 return retval;
391} 400}
@@ -424,11 +433,9 @@ void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
424 struct zfcp_port *port = unit->port; 433 struct zfcp_port *port = unit->port;
425 struct zfcp_adapter *adapter = port->adapter; 434 struct zfcp_adapter *adapter = port->adapter;
426 435
427 read_lock_irqsave(&zfcp_data.config_lock, flags); 436 write_lock_irqsave(&adapter->erp_lock, flags);
428 write_lock(&adapter->erp_lock);
429 _zfcp_erp_unit_reopen(unit, clear, id, ref); 437 _zfcp_erp_unit_reopen(unit, clear, id, ref);
430 write_unlock(&adapter->erp_lock); 438 write_unlock_irqrestore(&adapter->erp_lock, flags);
431 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
432} 439}
433 440
434static int status_change_set(unsigned long mask, atomic_t *status) 441static int status_change_set(unsigned long mask, atomic_t *status)
@@ -540,8 +547,10 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
540{ 547{
541 struct zfcp_port *port; 548 struct zfcp_port *port;
542 549
543 list_for_each_entry(port, &adapter->port_list_head, list) 550 read_lock(&adapter->port_list_lock);
551 list_for_each_entry(port, &adapter->port_list, list)
544 _zfcp_erp_port_reopen(port, clear, id, ref); 552 _zfcp_erp_port_reopen(port, clear, id, ref);
553 read_unlock(&adapter->port_list_lock);
545} 554}
546 555
547static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear, 556static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
@@ -549,8 +558,10 @@ static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
549{ 558{
550 struct zfcp_unit *unit; 559 struct zfcp_unit *unit;
551 560
552 list_for_each_entry(unit, &port->unit_list_head, list) 561 read_lock(&port->unit_list_lock);
562 list_for_each_entry(unit, &port->unit_list, list)
553 _zfcp_erp_unit_reopen(unit, clear, id, ref); 563 _zfcp_erp_unit_reopen(unit, clear, id, ref);
564 read_unlock(&port->unit_list_lock);
554} 565}
555 566
556static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 567static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -590,16 +601,14 @@ static void zfcp_erp_wakeup(struct zfcp_adapter *adapter)
590{ 601{
591 unsigned long flags; 602 unsigned long flags;
592 603
593 read_lock_irqsave(&zfcp_data.config_lock, flags); 604 read_lock_irqsave(&adapter->erp_lock, flags);
594 read_lock(&adapter->erp_lock);
595 if (list_empty(&adapter->erp_ready_head) && 605 if (list_empty(&adapter->erp_ready_head) &&
596 list_empty(&adapter->erp_running_head)) { 606 list_empty(&adapter->erp_running_head)) {
597 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, 607 atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
598 &adapter->status); 608 &adapter->status);
599 wake_up(&adapter->erp_done_wqh); 609 wake_up(&adapter->erp_done_wqh);
600 } 610 }
601 read_unlock(&adapter->erp_lock); 611 read_unlock_irqrestore(&adapter->erp_lock, flags);
602 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
603} 612}
604 613
605static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act) 614static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
@@ -858,10 +867,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
858 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) 867 if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP)
859 return zfcp_erp_open_ptp_port(act); 868 return zfcp_erp_open_ptp_port(act);
860 if (!port->d_id) { 869 if (!port->d_id) {
861 zfcp_port_get(port); 870 zfcp_fc_trigger_did_lookup(port);
862 if (!queue_work(adapter->work_queue,
863 &port->gid_pn_work))
864 zfcp_port_put(port);
865 return ZFCP_ERP_EXIT; 871 return ZFCP_ERP_EXIT;
866 } 872 }
867 return zfcp_erp_port_strategy_open_port(act); 873 return zfcp_erp_port_strategy_open_port(act);
@@ -869,12 +875,11 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
869 case ZFCP_ERP_STEP_PORT_OPENING: 875 case ZFCP_ERP_STEP_PORT_OPENING:
870 /* D_ID might have changed during open */ 876 /* D_ID might have changed during open */
871 if (p_status & ZFCP_STATUS_COMMON_OPEN) { 877 if (p_status & ZFCP_STATUS_COMMON_OPEN) {
872 if (port->d_id) 878 if (!port->d_id) {
873 return ZFCP_ERP_SUCCEEDED; 879 zfcp_fc_trigger_did_lookup(port);
874 else { 880 return ZFCP_ERP_EXIT;
875 act->step = ZFCP_ERP_STEP_PORT_CLOSING;
876 return ZFCP_ERP_CONTINUES;
877 } 881 }
882 return ZFCP_ERP_SUCCEEDED;
878 } 883 }
879 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { 884 if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
880 port->d_id = 0; 885 port->d_id = 0;
@@ -889,19 +894,21 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
889static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) 894static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action)
890{ 895{
891 struct zfcp_port *port = erp_action->port; 896 struct zfcp_port *port = erp_action->port;
897 int p_status = atomic_read(&port->status);
892 898
893 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) 899 if ((p_status & ZFCP_STATUS_COMMON_NOESC) &&
900 !(p_status & ZFCP_STATUS_COMMON_OPEN))
894 goto close_init_done; 901 goto close_init_done;
895 902
896 switch (erp_action->step) { 903 switch (erp_action->step) {
897 case ZFCP_ERP_STEP_UNINITIALIZED: 904 case ZFCP_ERP_STEP_UNINITIALIZED:
898 zfcp_erp_port_strategy_clearstati(port); 905 zfcp_erp_port_strategy_clearstati(port);
899 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) 906 if (p_status & ZFCP_STATUS_COMMON_OPEN)
900 return zfcp_erp_port_strategy_close(erp_action); 907 return zfcp_erp_port_strategy_close(erp_action);
901 break; 908 break;
902 909
903 case ZFCP_ERP_STEP_PORT_CLOSING: 910 case ZFCP_ERP_STEP_PORT_CLOSING:
904 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) 911 if (p_status & ZFCP_STATUS_COMMON_OPEN)
905 return ZFCP_ERP_FAILED; 912 return ZFCP_ERP_FAILED;
906 break; 913 break;
907 } 914 }
@@ -1172,28 +1179,28 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1172 switch (act->action) { 1179 switch (act->action) {
1173 case ZFCP_ERP_ACTION_REOPEN_UNIT: 1180 case ZFCP_ERP_ACTION_REOPEN_UNIT:
1174 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) { 1181 if ((result == ZFCP_ERP_SUCCEEDED) && !unit->device) {
1175 zfcp_unit_get(unit); 1182 get_device(&unit->sysfs_device);
1176 if (scsi_queue_work(unit->port->adapter->scsi_host, 1183 if (scsi_queue_work(unit->port->adapter->scsi_host,
1177 &unit->scsi_work) <= 0) 1184 &unit->scsi_work) <= 0)
1178 zfcp_unit_put(unit); 1185 put_device(&unit->sysfs_device);
1179 } 1186 }
1180 zfcp_unit_put(unit); 1187 put_device(&unit->sysfs_device);
1181 break; 1188 break;
1182 1189
1183 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1190 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1184 case ZFCP_ERP_ACTION_REOPEN_PORT: 1191 case ZFCP_ERP_ACTION_REOPEN_PORT:
1185 if (result == ZFCP_ERP_SUCCEEDED) 1192 if (result == ZFCP_ERP_SUCCEEDED)
1186 zfcp_scsi_schedule_rport_register(port); 1193 zfcp_scsi_schedule_rport_register(port);
1187 zfcp_port_put(port); 1194 put_device(&port->sysfs_device);
1188 break; 1195 break;
1189 1196
1190 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1197 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1191 if (result == ZFCP_ERP_SUCCEEDED) { 1198 if (result == ZFCP_ERP_SUCCEEDED) {
1192 register_service_level(&adapter->service_level); 1199 register_service_level(&adapter->service_level);
1193 schedule_work(&adapter->scan_work); 1200 queue_work(adapter->work_queue, &adapter->scan_work);
1194 } else 1201 } else
1195 unregister_service_level(&adapter->service_level); 1202 unregister_service_level(&adapter->service_level);
1196 zfcp_adapter_put(adapter); 1203 kref_put(&adapter->ref, zfcp_adapter_release);
1197 break; 1204 break;
1198 } 1205 }
1199} 1206}
@@ -1216,12 +1223,12 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
1216static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action) 1223static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1217{ 1224{
1218 int retval; 1225 int retval;
1219 struct zfcp_adapter *adapter = erp_action->adapter;
1220 unsigned long flags; 1226 unsigned long flags;
1227 struct zfcp_adapter *adapter = erp_action->adapter;
1221 1228
1222 read_lock_irqsave(&zfcp_data.config_lock, flags); 1229 kref_get(&adapter->ref);
1223 write_lock(&adapter->erp_lock);
1224 1230
1231 write_lock_irqsave(&adapter->erp_lock, flags);
1225 zfcp_erp_strategy_check_fsfreq(erp_action); 1232 zfcp_erp_strategy_check_fsfreq(erp_action);
1226 1233
1227 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) { 1234 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) {
@@ -1233,11 +1240,9 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1233 zfcp_erp_action_to_running(erp_action); 1240 zfcp_erp_action_to_running(erp_action);
1234 1241
1235 /* no lock to allow for blocking operations */ 1242 /* no lock to allow for blocking operations */
1236 write_unlock(&adapter->erp_lock); 1243 write_unlock_irqrestore(&adapter->erp_lock, flags);
1237 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1238 retval = zfcp_erp_strategy_do_action(erp_action); 1244 retval = zfcp_erp_strategy_do_action(erp_action);
1239 read_lock_irqsave(&zfcp_data.config_lock, flags); 1245 write_lock_irqsave(&adapter->erp_lock, flags);
1240 write_lock(&adapter->erp_lock);
1241 1246
1242 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED) 1247 if (erp_action->status & ZFCP_STATUS_ERP_DISMISSED)
1243 retval = ZFCP_ERP_CONTINUES; 1248 retval = ZFCP_ERP_CONTINUES;
@@ -1275,12 +1280,12 @@ static int zfcp_erp_strategy(struct zfcp_erp_action *erp_action)
1275 zfcp_erp_strategy_followup_failed(erp_action); 1280 zfcp_erp_strategy_followup_failed(erp_action);
1276 1281
1277 unlock: 1282 unlock:
1278 write_unlock(&adapter->erp_lock); 1283 write_unlock_irqrestore(&adapter->erp_lock, flags);
1279 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1280 1284
1281 if (retval != ZFCP_ERP_CONTINUES) 1285 if (retval != ZFCP_ERP_CONTINUES)
1282 zfcp_erp_action_cleanup(erp_action, retval); 1286 zfcp_erp_action_cleanup(erp_action, retval);
1283 1287
1288 kref_put(&adapter->ref, zfcp_adapter_release);
1284 return retval; 1289 return retval;
1285} 1290}
1286 1291
@@ -1417,6 +1422,7 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1417 void *ref, u32 mask, int set_or_clear) 1422 void *ref, u32 mask, int set_or_clear)
1418{ 1423{
1419 struct zfcp_port *port; 1424 struct zfcp_port *port;
1425 unsigned long flags;
1420 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1426 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1421 1427
1422 if (set_or_clear == ZFCP_SET) { 1428 if (set_or_clear == ZFCP_SET) {
@@ -1431,10 +1437,13 @@ void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
1431 atomic_set(&adapter->erp_counter, 0); 1437 atomic_set(&adapter->erp_counter, 0);
1432 } 1438 }
1433 1439
1434 if (common_mask) 1440 if (common_mask) {
1435 list_for_each_entry(port, &adapter->port_list_head, list) 1441 read_lock_irqsave(&adapter->port_list_lock, flags);
1442 list_for_each_entry(port, &adapter->port_list, list)
1436 zfcp_erp_modify_port_status(port, id, ref, common_mask, 1443 zfcp_erp_modify_port_status(port, id, ref, common_mask,
1437 set_or_clear); 1444 set_or_clear);
1445 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1446 }
1438} 1447}
1439 1448
1440/** 1449/**
@@ -1451,6 +1460,7 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1451 u32 mask, int set_or_clear) 1460 u32 mask, int set_or_clear)
1452{ 1461{
1453 struct zfcp_unit *unit; 1462 struct zfcp_unit *unit;
1463 unsigned long flags;
1454 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1464 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1455 1465
1456 if (set_or_clear == ZFCP_SET) { 1466 if (set_or_clear == ZFCP_SET) {
@@ -1465,10 +1475,13 @@ void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
1465 atomic_set(&port->erp_counter, 0); 1475 atomic_set(&port->erp_counter, 0);
1466 } 1476 }
1467 1477
1468 if (common_mask) 1478 if (common_mask) {
1469 list_for_each_entry(unit, &port->unit_list_head, list) 1479 read_lock_irqsave(&port->unit_list_lock, flags);
1480 list_for_each_entry(unit, &port->unit_list, list)
1470 zfcp_erp_modify_unit_status(unit, id, ref, common_mask, 1481 zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
1471 set_or_clear); 1482 set_or_clear);
1483 read_unlock_irqrestore(&port->unit_list_lock, flags);
1484 }
1472} 1485}
1473 1486
1474/** 1487/**
@@ -1504,12 +1517,8 @@ void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
1504 */ 1517 */
1505void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref) 1518void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
1506{ 1519{
1507 unsigned long flags;
1508
1509 read_lock_irqsave(&zfcp_data.config_lock, flags);
1510 zfcp_erp_modify_port_status(port, id, ref, 1520 zfcp_erp_modify_port_status(port, id, ref,
1511 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET); 1521 ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
1512 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1513 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref); 1522 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
1514} 1523}
1515 1524
@@ -1537,13 +1546,9 @@ void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
1537 */ 1546 */
1538void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref) 1547void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
1539{ 1548{
1540 unsigned long flags;
1541
1542 read_lock_irqsave(&zfcp_data.config_lock, flags);
1543 zfcp_erp_modify_port_status(port, id, ref, 1549 zfcp_erp_modify_port_status(port, id, ref,
1544 ZFCP_STATUS_COMMON_ERP_FAILED | 1550 ZFCP_STATUS_COMMON_ERP_FAILED |
1545 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET); 1551 ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
1546 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
1547} 1552}
1548 1553
1549/** 1554/**
@@ -1576,12 +1581,15 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
1576 void *ref) 1581 void *ref)
1577{ 1582{
1578 struct zfcp_unit *unit; 1583 struct zfcp_unit *unit;
1584 unsigned long flags;
1579 int status = atomic_read(&port->status); 1585 int status = atomic_read(&port->status);
1580 1586
1581 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED | 1587 if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
1582 ZFCP_STATUS_COMMON_ACCESS_BOXED))) { 1588 ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
1583 list_for_each_entry(unit, &port->unit_list_head, list) 1589 read_lock_irqsave(&port->unit_list_lock, flags);
1590 list_for_each_entry(unit, &port->unit_list, list)
1584 zfcp_erp_unit_access_changed(unit, id, ref); 1591 zfcp_erp_unit_access_changed(unit, id, ref);
1592 read_unlock_irqrestore(&port->unit_list_lock, flags);
1585 return; 1593 return;
1586 } 1594 }
1587 1595
@@ -1597,14 +1605,14 @@ static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
1597void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id, 1605void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
1598 void *ref) 1606 void *ref)
1599{ 1607{
1600 struct zfcp_port *port;
1601 unsigned long flags; 1608 unsigned long flags;
1609 struct zfcp_port *port;
1602 1610
1603 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) 1611 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
1604 return; 1612 return;
1605 1613
1606 read_lock_irqsave(&zfcp_data.config_lock, flags); 1614 read_lock_irqsave(&adapter->port_list_lock, flags);
1607 list_for_each_entry(port, &adapter->port_list_head, list) 1615 list_for_each_entry(port, &adapter->port_list, list)
1608 zfcp_erp_port_access_changed(port, id, ref); 1616 zfcp_erp_port_access_changed(port, id, ref);
1609 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 1617 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1610} 1618}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 36935bc0818f..03dec832b465 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -9,26 +9,31 @@
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
10#define ZFCP_EXT_H 10#define ZFCP_EXT_H
11 11
12#include <linux/types.h>
13#include <scsi/fc/fc_els.h>
12#include "zfcp_def.h" 14#include "zfcp_def.h"
15#include "zfcp_fc.h"
13 16
14/* zfcp_aux.c */ 17/* zfcp_aux.c */
15extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64); 18extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
16extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64); 19extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
17extern int zfcp_adapter_enqueue(struct ccw_device *); 20extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
18extern void zfcp_adapter_dequeue(struct zfcp_adapter *);
19extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32, 21extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
20 u32); 22 u32);
21extern void zfcp_port_dequeue(struct zfcp_port *);
22extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64); 23extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
23extern void zfcp_unit_dequeue(struct zfcp_unit *);
24extern int zfcp_reqlist_isempty(struct zfcp_adapter *); 24extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
25extern void zfcp_sg_free_table(struct scatterlist *, int); 25extern void zfcp_sg_free_table(struct scatterlist *, int);
26extern int zfcp_sg_setup_table(struct scatterlist *, int); 26extern int zfcp_sg_setup_table(struct scatterlist *, int);
27extern void zfcp_device_unregister(struct device *,
28 const struct attribute_group *);
29extern void zfcp_adapter_release(struct kref *);
30extern void zfcp_adapter_unregister(struct zfcp_adapter *);
27 31
28/* zfcp_ccw.c */ 32/* zfcp_ccw.c */
29extern int zfcp_ccw_register(void);
30extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); 33extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
31extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); 34extern struct ccw_driver zfcp_ccw_driver;
35extern struct zfcp_adapter *zfcp_ccw_adapter_by_cdev(struct ccw_device *);
36extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
32 37
33/* zfcp_cfdc.c */ 38/* zfcp_cfdc.c */
34extern struct miscdevice zfcp_cfdc_misc; 39extern struct miscdevice zfcp_cfdc_misc;
@@ -51,7 +56,7 @@ extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
51 struct fsf_status_read_buffer *); 56 struct fsf_status_read_buffer *);
52extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int); 57extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
53extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 58extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
54extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *); 59extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *, u32);
55extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *); 60extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
56extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *); 61extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
57extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *); 62extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
@@ -92,23 +97,22 @@ extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
92extern void zfcp_erp_timeout_handler(unsigned long); 97extern void zfcp_erp_timeout_handler(unsigned long);
93 98
94/* zfcp_fc.c */ 99/* zfcp_fc.c */
95extern int zfcp_fc_scan_ports(struct zfcp_adapter *); 100extern void zfcp_fc_scan_ports(struct work_struct *);
96extern void _zfcp_fc_scan_ports_later(struct work_struct *);
97extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); 101extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
98extern void zfcp_fc_port_did_lookup(struct work_struct *); 102extern void zfcp_fc_port_did_lookup(struct work_struct *);
99extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); 103extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *);
104extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fc_els_flogi *);
100extern void zfcp_fc_test_link(struct zfcp_port *); 105extern void zfcp_fc_test_link(struct zfcp_port *);
101extern void zfcp_fc_link_test_work(struct work_struct *); 106extern void zfcp_fc_link_test_work(struct work_struct *);
102extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *); 107extern void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *);
103extern int zfcp_fc_gs_setup(struct zfcp_adapter *); 108extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
104extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); 109extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
105extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *); 110extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
106extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
107 111
108/* zfcp_fsf.c */ 112/* zfcp_fsf.c */
109extern int zfcp_fsf_open_port(struct zfcp_erp_action *); 113extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
110extern int zfcp_fsf_open_wka_port(struct zfcp_wka_port *); 114extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
111extern int zfcp_fsf_close_wka_port(struct zfcp_wka_port *); 115extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
112extern int zfcp_fsf_close_port(struct zfcp_erp_action *); 116extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
113extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *); 117extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
114extern int zfcp_fsf_open_unit(struct zfcp_erp_action *); 118extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
@@ -124,8 +128,10 @@ extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
124extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *); 128extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
125extern int zfcp_fsf_status_read(struct zfcp_qdio *); 129extern int zfcp_fsf_status_read(struct zfcp_qdio *);
126extern int zfcp_status_read_refill(struct zfcp_adapter *adapter); 130extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
127extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *); 131extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
128extern int zfcp_fsf_send_els(struct zfcp_send_els *); 132 mempool_t *);
133extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
134 struct zfcp_fsf_ct_els *);
129extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *, 135extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
130 struct scsi_cmnd *); 136 struct scsi_cmnd *);
131extern void zfcp_fsf_req_free(struct zfcp_fsf_req *); 137extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
@@ -152,7 +158,6 @@ extern void zfcp_qdio_close(struct zfcp_qdio *);
152extern struct zfcp_data zfcp_data; 158extern struct zfcp_data zfcp_data;
153extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); 159extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
154extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 160extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
155extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
156extern struct fc_function_template zfcp_transport_functions; 161extern struct fc_function_template zfcp_transport_functions;
157extern void zfcp_scsi_rport_work(struct work_struct *); 162extern void zfcp_scsi_rport_work(struct work_struct *);
158extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); 163extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 722f22de8753..ac5e3b7a3576 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -9,73 +9,38 @@
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/types.h>
13#include <scsi/fc/fc_els.h>
14#include <scsi/libfc.h>
12#include "zfcp_ext.h" 15#include "zfcp_ext.h"
16#include "zfcp_fc.h"
13 17
14enum rscn_address_format { 18static u32 zfcp_fc_rscn_range_mask[] = {
15 RSCN_PORT_ADDRESS = 0x0, 19 [ELS_ADDR_FMT_PORT] = 0xFFFFFF,
16 RSCN_AREA_ADDRESS = 0x1, 20 [ELS_ADDR_FMT_AREA] = 0xFFFF00,
17 RSCN_DOMAIN_ADDRESS = 0x2, 21 [ELS_ADDR_FMT_DOM] = 0xFF0000,
18 RSCN_FABRIC_ADDRESS = 0x3, 22 [ELS_ADDR_FMT_FAB] = 0x000000,
19}; 23};
20 24
21static u32 rscn_range_mask[] = { 25static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
22 [RSCN_PORT_ADDRESS] = 0xFFFFFF,
23 [RSCN_AREA_ADDRESS] = 0xFFFF00,
24 [RSCN_DOMAIN_ADDRESS] = 0xFF0000,
25 [RSCN_FABRIC_ADDRESS] = 0x000000,
26};
27
28struct gpn_ft_resp_acc {
29 u8 control;
30 u8 port_id[3];
31 u8 reserved[4];
32 u64 wwpn;
33} __attribute__ ((packed));
34
35#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
36#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
37 / sizeof(struct gpn_ft_resp_acc))
38#define ZFCP_GPN_FT_BUFFERS 4
39#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
40 - sizeof(struct ct_hdr))
41#define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
42
43struct ct_iu_gpn_ft_resp {
44 struct ct_hdr header;
45 struct gpn_ft_resp_acc accept[ZFCP_GPN_FT_ENTRIES];
46} __attribute__ ((packed));
47
48struct zfcp_gpn_ft {
49 struct zfcp_send_ct ct;
50 struct scatterlist sg_req;
51 struct scatterlist sg_resp[ZFCP_GPN_FT_BUFFERS];
52};
53
54struct zfcp_fc_ns_handler_data {
55 struct completion done;
56 void (*handler)(unsigned long);
57 unsigned long handler_data;
58};
59
60static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
61{ 26{
62 if (mutex_lock_interruptible(&wka_port->mutex)) 27 if (mutex_lock_interruptible(&wka_port->mutex))
63 return -ERESTARTSYS; 28 return -ERESTARTSYS;
64 29
65 if (wka_port->status == ZFCP_WKA_PORT_OFFLINE || 30 if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
66 wka_port->status == ZFCP_WKA_PORT_CLOSING) { 31 wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
67 wka_port->status = ZFCP_WKA_PORT_OPENING; 32 wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
68 if (zfcp_fsf_open_wka_port(wka_port)) 33 if (zfcp_fsf_open_wka_port(wka_port))
69 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 34 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
70 } 35 }
71 36
72 mutex_unlock(&wka_port->mutex); 37 mutex_unlock(&wka_port->mutex);
73 38
74 wait_event(wka_port->completion_wq, 39 wait_event(wka_port->completion_wq,
75 wka_port->status == ZFCP_WKA_PORT_ONLINE || 40 wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
76 wka_port->status == ZFCP_WKA_PORT_OFFLINE); 41 wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
77 42
78 if (wka_port->status == ZFCP_WKA_PORT_ONLINE) { 43 if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
79 atomic_inc(&wka_port->refcount); 44 atomic_inc(&wka_port->refcount);
80 return 0; 45 return 0;
81 } 46 }
@@ -85,24 +50,24 @@ static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
85static void zfcp_fc_wka_port_offline(struct work_struct *work) 50static void zfcp_fc_wka_port_offline(struct work_struct *work)
86{ 51{
87 struct delayed_work *dw = to_delayed_work(work); 52 struct delayed_work *dw = to_delayed_work(work);
88 struct zfcp_wka_port *wka_port = 53 struct zfcp_fc_wka_port *wka_port =
89 container_of(dw, struct zfcp_wka_port, work); 54 container_of(dw, struct zfcp_fc_wka_port, work);
90 55
91 mutex_lock(&wka_port->mutex); 56 mutex_lock(&wka_port->mutex);
92 if ((atomic_read(&wka_port->refcount) != 0) || 57 if ((atomic_read(&wka_port->refcount) != 0) ||
93 (wka_port->status != ZFCP_WKA_PORT_ONLINE)) 58 (wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
94 goto out; 59 goto out;
95 60
96 wka_port->status = ZFCP_WKA_PORT_CLOSING; 61 wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
97 if (zfcp_fsf_close_wka_port(wka_port)) { 62 if (zfcp_fsf_close_wka_port(wka_port)) {
98 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 63 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
99 wake_up(&wka_port->completion_wq); 64 wake_up(&wka_port->completion_wq);
100 } 65 }
101out: 66out:
102 mutex_unlock(&wka_port->mutex); 67 mutex_unlock(&wka_port->mutex);
103} 68}
104 69
105static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port) 70static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
106{ 71{
107 if (atomic_dec_return(&wka_port->refcount) != 0) 72 if (atomic_dec_return(&wka_port->refcount) != 0)
108 return; 73 return;
@@ -110,7 +75,7 @@ static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
110 schedule_delayed_work(&wka_port->work, HZ / 100); 75 schedule_delayed_work(&wka_port->work, HZ / 100);
111} 76}
112 77
113static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id, 78static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
114 struct zfcp_adapter *adapter) 79 struct zfcp_adapter *adapter)
115{ 80{
116 init_waitqueue_head(&wka_port->completion_wq); 81 init_waitqueue_head(&wka_port->completion_wq);
@@ -118,107 +83,107 @@ static void zfcp_fc_wka_port_init(struct zfcp_wka_port *wka_port, u32 d_id,
118 wka_port->adapter = adapter; 83 wka_port->adapter = adapter;
119 wka_port->d_id = d_id; 84 wka_port->d_id = d_id;
120 85
121 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 86 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
122 atomic_set(&wka_port->refcount, 0); 87 atomic_set(&wka_port->refcount, 0);
123 mutex_init(&wka_port->mutex); 88 mutex_init(&wka_port->mutex);
124 INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline); 89 INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
125} 90}
126 91
127static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka) 92static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
128{ 93{
129 cancel_delayed_work_sync(&wka->work); 94 cancel_delayed_work_sync(&wka->work);
130 mutex_lock(&wka->mutex); 95 mutex_lock(&wka->mutex);
131 wka->status = ZFCP_WKA_PORT_OFFLINE; 96 wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
132 mutex_unlock(&wka->mutex); 97 mutex_unlock(&wka->mutex);
133} 98}
134 99
135void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs) 100void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
136{ 101{
102 if (!gs)
103 return;
137 zfcp_fc_wka_port_force_offline(&gs->ms); 104 zfcp_fc_wka_port_force_offline(&gs->ms);
138 zfcp_fc_wka_port_force_offline(&gs->ts); 105 zfcp_fc_wka_port_force_offline(&gs->ts);
139 zfcp_fc_wka_port_force_offline(&gs->ds); 106 zfcp_fc_wka_port_force_offline(&gs->ds);
140 zfcp_fc_wka_port_force_offline(&gs->as); 107 zfcp_fc_wka_port_force_offline(&gs->as);
141 zfcp_fc_wka_port_force_offline(&gs->ks);
142} 108}
143 109
144static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, 110static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
145 struct fcp_rscn_element *elem) 111 struct fc_els_rscn_page *page)
146{ 112{
147 unsigned long flags; 113 unsigned long flags;
114 struct zfcp_adapter *adapter = fsf_req->adapter;
148 struct zfcp_port *port; 115 struct zfcp_port *port;
149 116
150 read_lock_irqsave(&zfcp_data.config_lock, flags); 117 read_lock_irqsave(&adapter->port_list_lock, flags);
151 list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { 118 list_for_each_entry(port, &adapter->port_list, list) {
152 if ((port->d_id & range) == (elem->nport_did & range)) 119 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
153 zfcp_fc_test_link(port); 120 zfcp_fc_test_link(port);
154 if (!port->d_id) 121 if (!port->d_id)
155 zfcp_erp_port_reopen(port, 122 zfcp_erp_port_reopen(port,
156 ZFCP_STATUS_COMMON_ERP_FAILED, 123 ZFCP_STATUS_COMMON_ERP_FAILED,
157 "fcrscn1", NULL); 124 "fcrscn1", NULL);
158 } 125 }
159 126 read_unlock_irqrestore(&adapter->port_list_lock, flags);
160 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
161} 127}
162 128
163static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) 129static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
164{ 130{
165 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; 131 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
166 struct fcp_rscn_head *fcp_rscn_head; 132 struct fc_els_rscn *head;
167 struct fcp_rscn_element *fcp_rscn_element; 133 struct fc_els_rscn_page *page;
168 u16 i; 134 u16 i;
169 u16 no_entries; 135 u16 no_entries;
170 u32 range_mask; 136 unsigned int afmt;
171 137
172 fcp_rscn_head = (struct fcp_rscn_head *) status_buffer->payload.data; 138 head = (struct fc_els_rscn *) status_buffer->payload.data;
173 fcp_rscn_element = (struct fcp_rscn_element *) fcp_rscn_head; 139 page = (struct fc_els_rscn_page *) head;
174 140
175 /* see FC-FS */ 141 /* see FC-FS */
176 no_entries = fcp_rscn_head->payload_len / 142 no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
177 sizeof(struct fcp_rscn_element);
178 143
179 for (i = 1; i < no_entries; i++) { 144 for (i = 1; i < no_entries; i++) {
180 /* skip head and start with 1st element */ 145 /* skip head and start with 1st element */
181 fcp_rscn_element++; 146 page++;
182 range_mask = rscn_range_mask[fcp_rscn_element->addr_format]; 147 afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
183 _zfcp_fc_incoming_rscn(fsf_req, range_mask, fcp_rscn_element); 148 _zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
149 page);
184 } 150 }
185 schedule_work(&fsf_req->adapter->scan_work); 151 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
186} 152}
187 153
188static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) 154static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
189{ 155{
156 unsigned long flags;
190 struct zfcp_adapter *adapter = req->adapter; 157 struct zfcp_adapter *adapter = req->adapter;
191 struct zfcp_port *port; 158 struct zfcp_port *port;
192 unsigned long flags;
193 159
194 read_lock_irqsave(&zfcp_data.config_lock, flags); 160 read_lock_irqsave(&adapter->port_list_lock, flags);
195 list_for_each_entry(port, &adapter->port_list_head, list) 161 list_for_each_entry(port, &adapter->port_list, list)
196 if (port->wwpn == wwpn) 162 if (port->wwpn == wwpn) {
163 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
197 break; 164 break;
198 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 165 }
199 166 read_unlock_irqrestore(&adapter->port_list_lock, flags);
200 if (port && (port->wwpn == wwpn))
201 zfcp_erp_port_forced_reopen(port, 0, "fciwwp1", req);
202} 167}
203 168
204static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req) 169static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
205{ 170{
206 struct fsf_status_read_buffer *status_buffer = 171 struct fsf_status_read_buffer *status_buffer;
207 (struct fsf_status_read_buffer *)req->data; 172 struct fc_els_flogi *plogi;
208 struct fsf_plogi *els_plogi =
209 (struct fsf_plogi *) status_buffer->payload.data;
210 173
211 zfcp_fc_incoming_wwpn(req, els_plogi->serv_param.wwpn); 174 status_buffer = (struct fsf_status_read_buffer *) req->data;
175 plogi = (struct fc_els_flogi *) status_buffer->payload.data;
176 zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
212} 177}
213 178
214static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req) 179static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
215{ 180{
216 struct fsf_status_read_buffer *status_buffer = 181 struct fsf_status_read_buffer *status_buffer =
217 (struct fsf_status_read_buffer *)req->data; 182 (struct fsf_status_read_buffer *)req->data;
218 struct fcp_logo *els_logo = 183 struct fc_els_logo *logo =
219 (struct fcp_logo *) status_buffer->payload.data; 184 (struct fc_els_logo *) status_buffer->payload.data;
220 185
221 zfcp_fc_incoming_wwpn(req, els_logo->nport_wwpn); 186 zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
222} 187}
223 188
224/** 189/**
@@ -232,79 +197,72 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
232 unsigned int els_type = status_buffer->payload.data[0]; 197 unsigned int els_type = status_buffer->payload.data[0];
233 198
234 zfcp_dbf_san_incoming_els(fsf_req); 199 zfcp_dbf_san_incoming_els(fsf_req);
235 if (els_type == LS_PLOGI) 200 if (els_type == ELS_PLOGI)
236 zfcp_fc_incoming_plogi(fsf_req); 201 zfcp_fc_incoming_plogi(fsf_req);
237 else if (els_type == LS_LOGO) 202 else if (els_type == ELS_LOGO)
238 zfcp_fc_incoming_logo(fsf_req); 203 zfcp_fc_incoming_logo(fsf_req);
239 else if (els_type == LS_RSCN) 204 else if (els_type == ELS_RSCN)
240 zfcp_fc_incoming_rscn(fsf_req); 205 zfcp_fc_incoming_rscn(fsf_req);
241} 206}
242 207
243static void zfcp_fc_ns_handler(unsigned long data) 208static void zfcp_fc_ns_gid_pn_eval(void *data)
244{ 209{
245 struct zfcp_fc_ns_handler_data *compl_rec = 210 struct zfcp_fc_gid_pn *gid_pn = data;
246 (struct zfcp_fc_ns_handler_data *) data; 211 struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
247 212 struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
248 if (compl_rec->handler) 213 struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
249 compl_rec->handler(compl_rec->handler_data);
250
251 complete(&compl_rec->done);
252}
253
254static void zfcp_fc_ns_gid_pn_eval(unsigned long data)
255{
256 struct zfcp_gid_pn_data *gid_pn = (struct zfcp_gid_pn_data *) data;
257 struct zfcp_send_ct *ct = &gid_pn->ct;
258 struct ct_iu_gid_pn_req *ct_iu_req = sg_virt(ct->req);
259 struct ct_iu_gid_pn_resp *ct_iu_resp = sg_virt(ct->resp);
260 struct zfcp_port *port = gid_pn->port; 214 struct zfcp_port *port = gid_pn->port;
261 215
262 if (ct->status) 216 if (ct->status)
263 return; 217 return;
264 if (ct_iu_resp->header.cmd_rsp_code != ZFCP_CT_ACCEPT) 218 if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
265 return; 219 return;
266 220
267 /* paranoia */ 221 /* paranoia */
268 if (ct_iu_req->wwpn != port->wwpn) 222 if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
269 return; 223 return;
270 /* looks like a valid d_id */ 224 /* looks like a valid d_id */
271 port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK; 225 port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
226}
227
228static void zfcp_fc_complete(void *data)
229{
230 complete(data);
272} 231}
273 232
274static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, 233static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
275 struct zfcp_gid_pn_data *gid_pn) 234 struct zfcp_fc_gid_pn *gid_pn)
276{ 235{
277 struct zfcp_adapter *adapter = port->adapter; 236 struct zfcp_adapter *adapter = port->adapter;
278 struct zfcp_fc_ns_handler_data compl_rec; 237 DECLARE_COMPLETION_ONSTACK(completion);
279 int ret; 238 int ret;
280 239
281 /* setup parameters for send generic command */ 240 /* setup parameters for send generic command */
282 gid_pn->port = port; 241 gid_pn->port = port;
283 gid_pn->ct.wka_port = &adapter->gs->ds; 242 gid_pn->ct.handler = zfcp_fc_complete;
284 gid_pn->ct.handler = zfcp_fc_ns_handler; 243 gid_pn->ct.handler_data = &completion;
285 gid_pn->ct.handler_data = (unsigned long) &compl_rec; 244 gid_pn->ct.req = &gid_pn->sg_req;
286 gid_pn->ct.req = &gid_pn->req; 245 gid_pn->ct.resp = &gid_pn->sg_resp;
287 gid_pn->ct.resp = &gid_pn->resp; 246 sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
288 sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req, 247 sizeof(struct zfcp_fc_gid_pn_req));
289 sizeof(struct ct_iu_gid_pn_req)); 248 sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
290 sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp, 249 sizeof(struct zfcp_fc_gid_pn_resp));
291 sizeof(struct ct_iu_gid_pn_resp));
292 250
293 /* setup nameserver request */ 251 /* setup nameserver request */
294 gid_pn->ct_iu_req.header.revision = ZFCP_CT_REVISION; 252 gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
295 gid_pn->ct_iu_req.header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; 253 gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
296 gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER; 254 gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
297 gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS; 255 gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
298 gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN; 256 gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
299 gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4; 257 gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
300 gid_pn->ct_iu_req.wwpn = port->wwpn; 258 gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
301 259
302 init_completion(&compl_rec.done); 260 ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
303 compl_rec.handler = zfcp_fc_ns_gid_pn_eval; 261 adapter->pool.gid_pn_req);
304 compl_rec.handler_data = (unsigned long) gid_pn; 262 if (!ret) {
305 ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req); 263 wait_for_completion(&completion);
306 if (!ret) 264 zfcp_fc_ns_gid_pn_eval(gid_pn);
307 wait_for_completion(&compl_rec.done); 265 }
308 return ret; 266 return ret;
309} 267}
310 268
@@ -316,10 +274,10 @@ static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
316static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) 274static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
317{ 275{
318 int ret; 276 int ret;
319 struct zfcp_gid_pn_data *gid_pn; 277 struct zfcp_fc_gid_pn *gid_pn;
320 struct zfcp_adapter *adapter = port->adapter; 278 struct zfcp_adapter *adapter = port->adapter;
321 279
322 gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC); 280 gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
323 if (!gid_pn) 281 if (!gid_pn)
324 return -ENOMEM; 282 return -ENOMEM;
325 283
@@ -333,7 +291,7 @@ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
333 291
334 zfcp_fc_wka_port_put(&adapter->gs->ds); 292 zfcp_fc_wka_port_put(&adapter->gs->ds);
335out: 293out:
336 mempool_free(gid_pn, adapter->pool.gid_pn_data); 294 mempool_free(gid_pn, adapter->pool.gid_pn);
337 return ret; 295 return ret;
338} 296}
339 297
@@ -357,7 +315,18 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
357 315
358 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL); 316 zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
359out: 317out:
360 zfcp_port_put(port); 318 put_device(&port->sysfs_device);
319}
320
321/**
322 * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
323 * @port: The zfcp_port to lookup the d_id for.
324 */
325void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
326{
327 get_device(&port->sysfs_device);
328 if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
329 put_device(&port->sysfs_device);
361} 330}
362 331
363/** 332/**
@@ -367,33 +336,36 @@ out:
367 * 336 *
368 * Evaluate PLOGI playload and copy important fields into zfcp_port structure 337 * Evaluate PLOGI playload and copy important fields into zfcp_port structure
369 */ 338 */
370void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fsf_plogi *plogi) 339void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
371{ 340{
372 port->maxframe_size = plogi->serv_param.common_serv_param[7] | 341 if (plogi->fl_wwpn != port->wwpn) {
373 ((plogi->serv_param.common_serv_param[6] & 0x0F) << 8); 342 port->d_id = 0;
374 if (plogi->serv_param.class1_serv_param[0] & 0x80) 343 dev_warn(&port->adapter->ccw_device->dev,
344 "A port opened with WWPN 0x%016Lx returned data that "
345 "identifies it as WWPN 0x%016Lx\n",
346 (unsigned long long) port->wwpn,
347 (unsigned long long) plogi->fl_wwpn);
348 return;
349 }
350
351 port->wwnn = plogi->fl_wwnn;
352 port->maxframe_size = plogi->fl_csp.sp_bb_data;
353
354 if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
375 port->supported_classes |= FC_COS_CLASS1; 355 port->supported_classes |= FC_COS_CLASS1;
376 if (plogi->serv_param.class2_serv_param[0] & 0x80) 356 if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
377 port->supported_classes |= FC_COS_CLASS2; 357 port->supported_classes |= FC_COS_CLASS2;
378 if (plogi->serv_param.class3_serv_param[0] & 0x80) 358 if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
379 port->supported_classes |= FC_COS_CLASS3; 359 port->supported_classes |= FC_COS_CLASS3;
380 if (plogi->serv_param.class4_serv_param[0] & 0x80) 360 if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
381 port->supported_classes |= FC_COS_CLASS4; 361 port->supported_classes |= FC_COS_CLASS4;
382} 362}
383 363
384struct zfcp_els_adisc { 364static void zfcp_fc_adisc_handler(void *data)
385 struct zfcp_send_els els;
386 struct scatterlist req;
387 struct scatterlist resp;
388 struct zfcp_ls_adisc ls_adisc;
389 struct zfcp_ls_adisc ls_adisc_acc;
390};
391
392static void zfcp_fc_adisc_handler(unsigned long data)
393{ 365{
394 struct zfcp_els_adisc *adisc = (struct zfcp_els_adisc *) data; 366 struct zfcp_fc_els_adisc *adisc = data;
395 struct zfcp_port *port = adisc->els.port; 367 struct zfcp_port *port = adisc->els.port;
396 struct zfcp_ls_adisc *ls_adisc = &adisc->ls_adisc_acc; 368 struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
397 369
398 if (adisc->els.status) { 370 if (adisc->els.status) {
399 /* request rejected or timed out */ 371 /* request rejected or timed out */
@@ -403,9 +375,9 @@ static void zfcp_fc_adisc_handler(unsigned long data)
403 } 375 }
404 376
405 if (!port->wwnn) 377 if (!port->wwnn)
406 port->wwnn = ls_adisc->wwnn; 378 port->wwnn = adisc_resp->adisc_wwnn;
407 379
408 if ((port->wwpn != ls_adisc->wwpn) || 380 if ((port->wwpn != adisc_resp->adisc_wwpn) ||
409 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) { 381 !(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
410 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 382 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
411 "fcadh_2", NULL); 383 "fcadh_2", NULL);
@@ -416,40 +388,44 @@ static void zfcp_fc_adisc_handler(unsigned long data)
416 zfcp_scsi_schedule_rport_register(port); 388 zfcp_scsi_schedule_rport_register(port);
417 out: 389 out:
418 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); 390 atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
419 zfcp_port_put(port); 391 put_device(&port->sysfs_device);
420 kfree(adisc); 392 kmem_cache_free(zfcp_data.adisc_cache, adisc);
421} 393}
422 394
423static int zfcp_fc_adisc(struct zfcp_port *port) 395static int zfcp_fc_adisc(struct zfcp_port *port)
424{ 396{
425 struct zfcp_els_adisc *adisc; 397 struct zfcp_fc_els_adisc *adisc;
426 struct zfcp_adapter *adapter = port->adapter; 398 struct zfcp_adapter *adapter = port->adapter;
399 int ret;
427 400
428 adisc = kzalloc(sizeof(struct zfcp_els_adisc), GFP_ATOMIC); 401 adisc = kmem_cache_alloc(zfcp_data.adisc_cache, GFP_ATOMIC);
429 if (!adisc) 402 if (!adisc)
430 return -ENOMEM; 403 return -ENOMEM;
431 404
405 adisc->els.port = port;
432 adisc->els.req = &adisc->req; 406 adisc->els.req = &adisc->req;
433 adisc->els.resp = &adisc->resp; 407 adisc->els.resp = &adisc->resp;
434 sg_init_one(adisc->els.req, &adisc->ls_adisc, 408 sg_init_one(adisc->els.req, &adisc->adisc_req,
435 sizeof(struct zfcp_ls_adisc)); 409 sizeof(struct fc_els_adisc));
436 sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc, 410 sg_init_one(adisc->els.resp, &adisc->adisc_resp,
437 sizeof(struct zfcp_ls_adisc)); 411 sizeof(struct fc_els_adisc));
438 412
439 adisc->els.adapter = adapter;
440 adisc->els.port = port;
441 adisc->els.d_id = port->d_id;
442 adisc->els.handler = zfcp_fc_adisc_handler; 413 adisc->els.handler = zfcp_fc_adisc_handler;
443 adisc->els.handler_data = (unsigned long) adisc; 414 adisc->els.handler_data = adisc;
444 adisc->els.ls_code = adisc->ls_adisc.code = ZFCP_LS_ADISC;
445 415
446 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports 416 /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
447 without FC-AL-2 capability, so we don't set it */ 417 without FC-AL-2 capability, so we don't set it */
448 adisc->ls_adisc.wwpn = fc_host_port_name(adapter->scsi_host); 418 adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
449 adisc->ls_adisc.wwnn = fc_host_node_name(adapter->scsi_host); 419 adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
450 adisc->ls_adisc.nport_id = fc_host_port_id(adapter->scsi_host); 420 adisc->adisc_req.adisc_cmd = ELS_ADISC;
421 hton24(adisc->adisc_req.adisc_port_id,
422 fc_host_port_id(adapter->scsi_host));
423
424 ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els);
425 if (ret)
426 kmem_cache_free(zfcp_data.adisc_cache, adisc);
451 427
452 return zfcp_fsf_send_els(&adisc->els); 428 return ret;
453} 429}
454 430
455void zfcp_fc_link_test_work(struct work_struct *work) 431void zfcp_fc_link_test_work(struct work_struct *work)
@@ -458,7 +434,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
458 container_of(work, struct zfcp_port, test_link_work); 434 container_of(work, struct zfcp_port, test_link_work);
459 int retval; 435 int retval;
460 436
461 zfcp_port_get(port); 437 get_device(&port->sysfs_device);
462 port->rport_task = RPORT_DEL; 438 port->rport_task = RPORT_DEL;
463 zfcp_scsi_rport_work(&port->rport_work); 439 zfcp_scsi_rport_work(&port->rport_work);
464 440
@@ -477,7 +453,7 @@ void zfcp_fc_link_test_work(struct work_struct *work)
477 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL); 453 zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
478 454
479out: 455out:
480 zfcp_port_put(port); 456 put_device(&port->sysfs_device);
481} 457}
482 458
483/** 459/**
@@ -490,12 +466,12 @@ out:
490 */ 466 */
491void zfcp_fc_test_link(struct zfcp_port *port) 467void zfcp_fc_test_link(struct zfcp_port *port)
492{ 468{
493 zfcp_port_get(port); 469 get_device(&port->sysfs_device);
494 if (!queue_work(port->adapter->work_queue, &port->test_link_work)) 470 if (!queue_work(port->adapter->work_queue, &port->test_link_work))
495 zfcp_port_put(port); 471 put_device(&port->sysfs_device);
496} 472}
497 473
498static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num) 474static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
499{ 475{
500 struct scatterlist *sg = &gpn_ft->sg_req; 476 struct scatterlist *sg = &gpn_ft->sg_req;
501 477
@@ -505,10 +481,10 @@ static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
505 kfree(gpn_ft); 481 kfree(gpn_ft);
506} 482}
507 483
508static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num) 484static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
509{ 485{
510 struct zfcp_gpn_ft *gpn_ft; 486 struct zfcp_fc_gpn_ft *gpn_ft;
511 struct ct_iu_gpn_ft_req *req; 487 struct zfcp_fc_gpn_ft_req *req;
512 488
513 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); 489 gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
514 if (!gpn_ft) 490 if (!gpn_ft)
@@ -531,159 +507,152 @@ out:
531} 507}
532 508
533 509
534static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft, 510static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
535 struct zfcp_adapter *adapter, int max_bytes) 511 struct zfcp_adapter *adapter, int max_bytes)
536{ 512{
537 struct zfcp_send_ct *ct = &gpn_ft->ct; 513 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
538 struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); 514 struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
539 struct zfcp_fc_ns_handler_data compl_rec; 515 DECLARE_COMPLETION_ONSTACK(completion);
540 int ret; 516 int ret;
541 517
542 /* prepare CT IU for GPN_FT */ 518 /* prepare CT IU for GPN_FT */
543 req->header.revision = ZFCP_CT_REVISION; 519 req->ct_hdr.ct_rev = FC_CT_REV;
544 req->header.gs_type = ZFCP_CT_DIRECTORY_SERVICE; 520 req->ct_hdr.ct_fs_type = FC_FST_DIR;
545 req->header.gs_subtype = ZFCP_CT_NAME_SERVER; 521 req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
546 req->header.options = ZFCP_CT_SYNCHRONOUS; 522 req->ct_hdr.ct_options = 0;
547 req->header.cmd_rsp_code = ZFCP_CT_GPN_FT; 523 req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
548 req->header.max_res_size = max_bytes / 4; 524 req->ct_hdr.ct_mr_size = max_bytes / 4;
549 req->flags = 0; 525 req->gpn_ft.fn_domain_id_scope = 0;
550 req->domain_id_scope = 0; 526 req->gpn_ft.fn_area_id_scope = 0;
551 req->area_id_scope = 0; 527 req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
552 req->fc4_type = ZFCP_CT_SCSI_FCP;
553 528
554 /* prepare zfcp_send_ct */ 529 /* prepare zfcp_send_ct */
555 ct->wka_port = &adapter->gs->ds; 530 ct->handler = zfcp_fc_complete;
556 ct->handler = zfcp_fc_ns_handler; 531 ct->handler_data = &completion;
557 ct->handler_data = (unsigned long)&compl_rec;
558 ct->req = &gpn_ft->sg_req; 532 ct->req = &gpn_ft->sg_req;
559 ct->resp = gpn_ft->sg_resp; 533 ct->resp = gpn_ft->sg_resp;
560 534
561 init_completion(&compl_rec.done); 535 ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL);
562 compl_rec.handler = NULL;
563 ret = zfcp_fsf_send_ct(ct, NULL);
564 if (!ret) 536 if (!ret)
565 wait_for_completion(&compl_rec.done); 537 wait_for_completion(&completion);
566 return ret; 538 return ret;
567} 539}
568 540
569static void zfcp_fc_validate_port(struct zfcp_port *port) 541static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
570{ 542{
571 struct zfcp_adapter *adapter = port->adapter;
572
573 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC)) 543 if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
574 return; 544 return;
575 545
576 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status); 546 atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
577 547
578 if ((port->supported_classes != 0) || 548 if ((port->supported_classes != 0) ||
579 !list_empty(&port->unit_list_head)) { 549 !list_empty(&port->unit_list))
580 zfcp_port_put(port);
581 return; 550 return;
582 } 551
583 zfcp_erp_port_shutdown(port, 0, "fcpval1", NULL); 552 list_move_tail(&port->list, lh);
584 zfcp_erp_wait(adapter);
585 zfcp_port_put(port);
586 zfcp_port_dequeue(port);
587} 553}
588 554
589static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries) 555static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
556 struct zfcp_adapter *adapter, int max_entries)
590{ 557{
591 struct zfcp_send_ct *ct = &gpn_ft->ct; 558 struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
592 struct scatterlist *sg = gpn_ft->sg_resp; 559 struct scatterlist *sg = gpn_ft->sg_resp;
593 struct ct_hdr *hdr = sg_virt(sg); 560 struct fc_ct_hdr *hdr = sg_virt(sg);
594 struct gpn_ft_resp_acc *acc = sg_virt(sg); 561 struct fc_gpn_ft_resp *acc = sg_virt(sg);
595 struct zfcp_adapter *adapter = ct->wka_port->adapter;
596 struct zfcp_port *port, *tmp; 562 struct zfcp_port *port, *tmp;
563 unsigned long flags;
564 LIST_HEAD(remove_lh);
597 u32 d_id; 565 u32 d_id;
598 int ret = 0, x, last = 0; 566 int ret = 0, x, last = 0;
599 567
600 if (ct->status) 568 if (ct->status)
601 return -EIO; 569 return -EIO;
602 570
603 if (hdr->cmd_rsp_code != ZFCP_CT_ACCEPT) { 571 if (hdr->ct_cmd != FC_FS_ACC) {
604 if (hdr->reason_code == ZFCP_CT_UNABLE_TO_PERFORM_CMD) 572 if (hdr->ct_reason == FC_BA_RJT_UNABLE)
605 return -EAGAIN; /* might be a temporary condition */ 573 return -EAGAIN; /* might be a temporary condition */
606 return -EIO; 574 return -EIO;
607 } 575 }
608 576
609 if (hdr->max_res_size) { 577 if (hdr->ct_mr_size) {
610 dev_warn(&adapter->ccw_device->dev, 578 dev_warn(&adapter->ccw_device->dev,
611 "The name server reported %d words residual data\n", 579 "The name server reported %d words residual data\n",
612 hdr->max_res_size); 580 hdr->ct_mr_size);
613 return -E2BIG; 581 return -E2BIG;
614 } 582 }
615 583
616 mutex_lock(&zfcp_data.config_mutex);
617
618 /* first entry is the header */ 584 /* first entry is the header */
619 for (x = 1; x < max_entries && !last; x++) { 585 for (x = 1; x < max_entries && !last; x++) {
620 if (x % (ZFCP_GPN_FT_ENTRIES + 1)) 586 if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
621 acc++; 587 acc++;
622 else 588 else
623 acc = sg_virt(++sg); 589 acc = sg_virt(++sg);
624 590
625 last = acc->control & 0x80; 591 last = acc->fp_flags & FC_NS_FID_LAST;
626 d_id = acc->port_id[0] << 16 | acc->port_id[1] << 8 | 592 d_id = ntoh24(acc->fp_fid);
627 acc->port_id[2];
628 593
629 /* don't attach ports with a well known address */ 594 /* don't attach ports with a well known address */
630 if ((d_id & ZFCP_DID_WKA) == ZFCP_DID_WKA) 595 if (d_id >= FC_FID_WELL_KNOWN_BASE)
631 continue; 596 continue;
632 /* skip the adapter's port and known remote ports */ 597 /* skip the adapter's port and known remote ports */
633 if (acc->wwpn == fc_host_port_name(adapter->scsi_host)) 598 if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
634 continue;
635 port = zfcp_get_port_by_wwpn(adapter, acc->wwpn);
636 if (port)
637 continue; 599 continue;
638 600
639 port = zfcp_port_enqueue(adapter, acc->wwpn, 601 port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
640 ZFCP_STATUS_COMMON_NOESC, d_id); 602 ZFCP_STATUS_COMMON_NOESC, d_id);
641 if (IS_ERR(port)) 603 if (!IS_ERR(port))
642 ret = PTR_ERR(port);
643 else
644 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL); 604 zfcp_erp_port_reopen(port, 0, "fcegpf1", NULL);
605 else if (PTR_ERR(port) != -EEXIST)
606 ret = PTR_ERR(port);
645 } 607 }
646 608
647 zfcp_erp_wait(adapter); 609 zfcp_erp_wait(adapter);
648 list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list) 610 write_lock_irqsave(&adapter->port_list_lock, flags);
649 zfcp_fc_validate_port(port); 611 list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
650 mutex_unlock(&zfcp_data.config_mutex); 612 zfcp_fc_validate_port(port, &remove_lh);
613 write_unlock_irqrestore(&adapter->port_list_lock, flags);
614
615 list_for_each_entry_safe(port, tmp, &remove_lh, list) {
616 zfcp_erp_port_shutdown(port, 0, "fcegpf2", NULL);
617 zfcp_device_unregister(&port->sysfs_device,
618 &zfcp_sysfs_port_attrs);
619 }
620
651 return ret; 621 return ret;
652} 622}
653 623
654/** 624/**
655 * zfcp_fc_scan_ports - scan remote ports and attach new ports 625 * zfcp_fc_scan_ports - scan remote ports and attach new ports
656 * @adapter: pointer to struct zfcp_adapter 626 * @work: reference to scheduled work
657 */ 627 */
658int zfcp_fc_scan_ports(struct zfcp_adapter *adapter) 628void zfcp_fc_scan_ports(struct work_struct *work)
659{ 629{
630 struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
631 scan_work);
660 int ret, i; 632 int ret, i;
661 struct zfcp_gpn_ft *gpn_ft; 633 struct zfcp_fc_gpn_ft *gpn_ft;
662 int chain, max_entries, buf_num, max_bytes; 634 int chain, max_entries, buf_num, max_bytes;
663 635
664 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; 636 chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
665 buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1; 637 buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
666 max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES; 638 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
667 max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE; 639 max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
668 640
669 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && 641 if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
670 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) 642 fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
671 return 0; 643 return;
672 644
673 ret = zfcp_fc_wka_port_get(&adapter->gs->ds); 645 if (zfcp_fc_wka_port_get(&adapter->gs->ds))
674 if (ret) 646 return;
675 return ret;
676 647
677 gpn_ft = zfcp_alloc_sg_env(buf_num); 648 gpn_ft = zfcp_alloc_sg_env(buf_num);
678 if (!gpn_ft) { 649 if (!gpn_ft)
679 ret = -ENOMEM;
680 goto out; 650 goto out;
681 }
682 651
683 for (i = 0; i < 3; i++) { 652 for (i = 0; i < 3; i++) {
684 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); 653 ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
685 if (!ret) { 654 if (!ret) {
686 ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries); 655 ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
687 if (ret == -EAGAIN) 656 if (ret == -EAGAIN)
688 ssleep(1); 657 ssleep(1);
689 else 658 else
@@ -693,174 +662,116 @@ int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
693 zfcp_free_sg_env(gpn_ft, buf_num); 662 zfcp_free_sg_env(gpn_ft, buf_num);
694out: 663out:
695 zfcp_fc_wka_port_put(&adapter->gs->ds); 664 zfcp_fc_wka_port_put(&adapter->gs->ds);
696 return ret;
697} 665}
698 666
699 667static void zfcp_fc_ct_els_job_handler(void *data)
700void _zfcp_fc_scan_ports_later(struct work_struct *work)
701{ 668{
702 zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work)); 669 struct fc_bsg_job *job = data;
703} 670 struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
704 671 int status = zfcp_ct_els->status;
705struct zfcp_els_fc_job { 672 int reply_status;
706 struct zfcp_send_els els;
707 struct fc_bsg_job *job;
708};
709
710static void zfcp_fc_generic_els_handler(unsigned long data)
711{
712 struct zfcp_els_fc_job *els_fc_job = (struct zfcp_els_fc_job *) data;
713 struct fc_bsg_job *job = els_fc_job->job;
714 struct fc_bsg_reply *reply = job->reply;
715
716 if (els_fc_job->els.status) {
717 /* request rejected or timed out */
718 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_REJECT;
719 goto out;
720 }
721
722 reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
723 reply->reply_payload_rcv_len = job->reply_payload.payload_len;
724 673
725out: 674 reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
726 job->state_flags = FC_RQST_STATE_DONE; 675 job->reply->reply_data.ctels_reply.status = reply_status;
676 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
727 job->job_done(job); 677 job->job_done(job);
728 kfree(els_fc_job);
729} 678}
730 679
731int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *job) 680static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
681 struct zfcp_adapter *adapter)
732{ 682{
733 struct zfcp_els_fc_job *els_fc_job; 683 struct zfcp_fsf_ct_els *els = job->dd_data;
734 struct fc_rport *rport = job->rport; 684 struct fc_rport *rport = job->rport;
735 struct Scsi_Host *shost;
736 struct zfcp_adapter *adapter;
737 struct zfcp_port *port; 685 struct zfcp_port *port;
738 u8 *port_did; 686 u32 d_id;
739
740 shost = rport ? rport_to_shost(rport) : job->shost;
741 adapter = (struct zfcp_adapter *)shost->hostdata[0];
742
743 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
744 return -EINVAL;
745
746 els_fc_job = kzalloc(sizeof(struct zfcp_els_fc_job), GFP_KERNEL);
747 if (!els_fc_job)
748 return -ENOMEM;
749 687
750 els_fc_job->els.adapter = adapter;
751 if (rport) { 688 if (rport) {
752 read_lock_irq(&zfcp_data.config_lock);
753 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 689 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
754 if (port) 690 if (!port)
755 els_fc_job->els.d_id = port->d_id;
756 read_unlock_irq(&zfcp_data.config_lock);
757 if (!port) {
758 kfree(els_fc_job);
759 return -EINVAL; 691 return -EINVAL;
760 }
761 } else {
762 port_did = job->request->rqst_data.h_els.port_id;
763 els_fc_job->els.d_id = (port_did[0] << 16) +
764 (port_did[1] << 8) + port_did[2];
765 }
766
767 els_fc_job->els.req = job->request_payload.sg_list;
768 els_fc_job->els.resp = job->reply_payload.sg_list;
769 els_fc_job->els.handler = zfcp_fc_generic_els_handler;
770 els_fc_job->els.handler_data = (unsigned long) els_fc_job;
771 els_fc_job->job = job;
772
773 return zfcp_fsf_send_els(&els_fc_job->els);
774}
775
776struct zfcp_ct_fc_job {
777 struct zfcp_send_ct ct;
778 struct fc_bsg_job *job;
779};
780
781static void zfcp_fc_generic_ct_handler(unsigned long data)
782{
783 struct zfcp_ct_fc_job *ct_fc_job = (struct zfcp_ct_fc_job *) data;
784 struct fc_bsg_job *job = ct_fc_job->job;
785
786 job->reply->reply_data.ctels_reply.status = ct_fc_job->ct.status ?
787 FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
788 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
789 job->state_flags = FC_RQST_STATE_DONE;
790 job->job_done(job);
791 692
792 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); 693 d_id = port->d_id;
694 put_device(&port->sysfs_device);
695 } else
696 d_id = ntoh24(job->request->rqst_data.h_els.port_id);
793 697
794 kfree(ct_fc_job); 698 return zfcp_fsf_send_els(adapter, d_id, els);
795} 699}
796 700
797int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *job) 701static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
702 struct zfcp_adapter *adapter)
798{ 703{
799 int ret; 704 int ret;
800 u8 gs_type; 705 u8 gs_type;
801 struct fc_rport *rport = job->rport; 706 struct zfcp_fsf_ct_els *ct = job->dd_data;
802 struct Scsi_Host *shost; 707 struct zfcp_fc_wka_port *wka_port;
803 struct zfcp_adapter *adapter;
804 struct zfcp_ct_fc_job *ct_fc_job;
805 u32 preamble_word1; 708 u32 preamble_word1;
806 709
807 shost = rport ? rport_to_shost(rport) : job->shost;
808
809 adapter = (struct zfcp_adapter *)shost->hostdata[0];
810 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
811 return -EINVAL;
812
813 ct_fc_job = kzalloc(sizeof(struct zfcp_ct_fc_job), GFP_KERNEL);
814 if (!ct_fc_job)
815 return -ENOMEM;
816
817 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1; 710 preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
818 gs_type = (preamble_word1 & 0xff000000) >> 24; 711 gs_type = (preamble_word1 & 0xff000000) >> 24;
819 712
820 switch (gs_type) { 713 switch (gs_type) {
821 case FC_FST_ALIAS: 714 case FC_FST_ALIAS:
822 ct_fc_job->ct.wka_port = &adapter->gs->as; 715 wka_port = &adapter->gs->as;
823 break; 716 break;
824 case FC_FST_MGMT: 717 case FC_FST_MGMT:
825 ct_fc_job->ct.wka_port = &adapter->gs->ms; 718 wka_port = &adapter->gs->ms;
826 break; 719 break;
827 case FC_FST_TIME: 720 case FC_FST_TIME:
828 ct_fc_job->ct.wka_port = &adapter->gs->ts; 721 wka_port = &adapter->gs->ts;
829 break; 722 break;
830 case FC_FST_DIR: 723 case FC_FST_DIR:
831 ct_fc_job->ct.wka_port = &adapter->gs->ds; 724 wka_port = &adapter->gs->ds;
832 break; 725 break;
833 default: 726 default:
834 kfree(ct_fc_job);
835 return -EINVAL; /* no such service */ 727 return -EINVAL; /* no such service */
836 } 728 }
837 729
838 ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port); 730 ret = zfcp_fc_wka_port_get(wka_port);
839 if (ret) { 731 if (ret)
840 kfree(ct_fc_job);
841 return ret; 732 return ret;
842 }
843 733
844 ct_fc_job->ct.req = job->request_payload.sg_list; 734 ret = zfcp_fsf_send_ct(wka_port, ct, NULL);
845 ct_fc_job->ct.resp = job->reply_payload.sg_list; 735 if (ret)
846 ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler; 736 zfcp_fc_wka_port_put(wka_port);
847 ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job; 737
848 ct_fc_job->ct.completion = NULL; 738 return ret;
849 ct_fc_job->job = job; 739}
850 740
851 ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL); 741int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
852 if (ret) { 742{
853 kfree(ct_fc_job); 743 struct Scsi_Host *shost;
854 zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port); 744 struct zfcp_adapter *adapter;
745 struct zfcp_fsf_ct_els *ct_els = job->dd_data;
746
747 shost = job->rport ? rport_to_shost(job->rport) : job->shost;
748 adapter = (struct zfcp_adapter *)shost->hostdata[0];
749
750 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
751 return -EINVAL;
752
753 ct_els->req = job->request_payload.sg_list;
754 ct_els->resp = job->reply_payload.sg_list;
755 ct_els->handler = zfcp_fc_ct_els_job_handler;
756 ct_els->handler_data = job;
757
758 switch (job->request->msgcode) {
759 case FC_BSG_RPT_ELS:
760 case FC_BSG_HST_ELS_NOLOGIN:
761 return zfcp_fc_exec_els_job(job, adapter);
762 case FC_BSG_RPT_CT:
763 case FC_BSG_HST_CT:
764 return zfcp_fc_exec_ct_job(job, adapter);
765 default:
766 return -EINVAL;
855 } 767 }
856 return ret;
857} 768}
858 769
859int zfcp_fc_gs_setup(struct zfcp_adapter *adapter) 770int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
860{ 771{
861 struct zfcp_wka_ports *wka_ports; 772 struct zfcp_fc_wka_ports *wka_ports;
862 773
863 wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL); 774 wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
864 if (!wka_ports) 775 if (!wka_ports)
865 return -ENOMEM; 776 return -ENOMEM;
866 777
@@ -869,7 +780,6 @@ int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
869 zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter); 780 zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
870 zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter); 781 zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
871 zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter); 782 zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
872 zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
873 783
874 return 0; 784 return 0;
875} 785}
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
new file mode 100644
index 000000000000..cb2a3669a384
--- /dev/null
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -0,0 +1,260 @@
1/*
2 * zfcp device driver
3 *
4 * Fibre Channel related definitions and inline functions for the zfcp
5 * device driver
6 *
7 * Copyright IBM Corporation 2009
8 */
9
10#ifndef ZFCP_FC_H
11#define ZFCP_FC_H
12
13#include <scsi/fc/fc_els.h>
14#include <scsi/fc/fc_fcp.h>
15#include <scsi/fc/fc_ns.h>
16#include <scsi/scsi_cmnd.h>
17#include <scsi/scsi_tcq.h>
18#include "zfcp_fsf.h"
19
20#define ZFCP_FC_CT_SIZE_PAGE (PAGE_SIZE - sizeof(struct fc_ct_hdr))
21#define ZFCP_FC_GPN_FT_ENT_PAGE (ZFCP_FC_CT_SIZE_PAGE \
22 / sizeof(struct fc_gpn_ft_resp))
23#define ZFCP_FC_GPN_FT_NUM_BUFS 4 /* memory pages */
24
25#define ZFCP_FC_GPN_FT_MAX_SIZE (ZFCP_FC_GPN_FT_NUM_BUFS * PAGE_SIZE \
26 - sizeof(struct fc_ct_hdr))
27#define ZFCP_FC_GPN_FT_MAX_ENT (ZFCP_FC_GPN_FT_NUM_BUFS * \
28 (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
29
30/**
31 * struct zfcp_fc_gid_pn_req - container for ct header plus gid_pn request
32 * @ct_hdr: FC GS common transport header
33 * @gid_pn: GID_PN request
34 */
35struct zfcp_fc_gid_pn_req {
36 struct fc_ct_hdr ct_hdr;
37 struct fc_ns_gid_pn gid_pn;
38} __packed;
39
40/**
41 * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
42 * @ct_hdr: FC GS common transport header
43 * @gid_pn: GID_PN response
44 */
45struct zfcp_fc_gid_pn_resp {
46 struct fc_ct_hdr ct_hdr;
47 struct fc_gid_pn_resp gid_pn;
48} __packed;
49
50/**
51 * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
52 * @ct: data passed to zfcp_fsf for issuing fsf request
53 * @sg_req: scatterlist entry for request data
54 * @sg_resp: scatterlist entry for response data
55 * @gid_pn_req: GID_PN request data
56 * @gid_pn_resp: GID_PN response data
57 */
58struct zfcp_fc_gid_pn {
59 struct zfcp_fsf_ct_els ct;
60 struct scatterlist sg_req;
61 struct scatterlist sg_resp;
62 struct zfcp_fc_gid_pn_req gid_pn_req;
63 struct zfcp_fc_gid_pn_resp gid_pn_resp;
64 struct zfcp_port *port;
65};
66
67/**
68 * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
69 * @ct_hdr: FC GS common transport header
70 * @gpn_ft: GPN_FT request
71 */
72struct zfcp_fc_gpn_ft_req {
73 struct fc_ct_hdr ct_hdr;
74 struct fc_ns_gid_ft gpn_ft;
75} __packed;
76
77/**
78 * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
79 * @ct_hdr: FC GS common transport header
80 * @gpn_ft: Array of gpn_ft response data to fill one memory page
81 */
82struct zfcp_fc_gpn_ft_resp {
83 struct fc_ct_hdr ct_hdr;
84 struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
85} __packed;
86
87/**
88 * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
89 * @ct: data passed to zfcp_fsf for issuing fsf request
90 * @sg_req: scatter list entry for gpn_ft request
91 * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
92 */
93struct zfcp_fc_gpn_ft {
94 struct zfcp_fsf_ct_els ct;
95 struct scatterlist sg_req;
96 struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
97};
98
99/**
100 * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
101 * @els: data required for issuing els fsf command
102 * @req: scatterlist entry for ELS ADISC request
103 * @resp: scatterlist entry for ELS ADISC response
104 * @adisc_req: ELS ADISC request data
105 * @adisc_resp: ELS ADISC response data
106 */
107struct zfcp_fc_els_adisc {
108 struct zfcp_fsf_ct_els els;
109 struct scatterlist req;
110 struct scatterlist resp;
111 struct fc_els_adisc adisc_req;
112 struct fc_els_adisc adisc_resp;
113};
114
115/**
116 * enum zfcp_fc_wka_status - FC WKA port status in zfcp
117 * @ZFCP_FC_WKA_PORT_OFFLINE: Port is closed and not in use
118 * @ZFCP_FC_WKA_PORT_CLOSING: The FSF "close port" request is pending
119 * @ZFCP_FC_WKA_PORT_OPENING: The FSF "open port" request is pending
120 * @ZFCP_FC_WKA_PORT_ONLINE: The port is open and the port handle is valid
121 */
122enum zfcp_fc_wka_status {
123 ZFCP_FC_WKA_PORT_OFFLINE,
124 ZFCP_FC_WKA_PORT_CLOSING,
125 ZFCP_FC_WKA_PORT_OPENING,
126 ZFCP_FC_WKA_PORT_ONLINE,
127};
128
129/**
130 * struct zfcp_fc_wka_port - representation of well-known-address (WKA) FC port
131 * @adapter: Pointer to adapter structure this WKA port belongs to
132 * @completion_wq: Wait for completion of open/close command
133 * @status: Current status of WKA port
134 * @refcount: Reference count to keep port open as long as it is in use
135 * @d_id: FC destination id or well-known-address
136 * @handle: FSF handle for the open WKA port
137 * @mutex: Mutex used during opening/closing state changes
138 * @work: For delaying the closing of the WKA port
139 */
140struct zfcp_fc_wka_port {
141 struct zfcp_adapter *adapter;
142 wait_queue_head_t completion_wq;
143 enum zfcp_fc_wka_status status;
144 atomic_t refcount;
145 u32 d_id;
146 u32 handle;
147 struct mutex mutex;
148 struct delayed_work work;
149};
150
151/**
152 * struct zfcp_fc_wka_ports - Data structures for FC generic services
153 * @ms: FC Management service
154 * @ts: FC time service
155 * @ds: FC directory service
156 * @as: FC alias service
157 */
158struct zfcp_fc_wka_ports {
159 struct zfcp_fc_wka_port ms;
160 struct zfcp_fc_wka_port ts;
161 struct zfcp_fc_wka_port ds;
162 struct zfcp_fc_wka_port as;
163};
164
165/**
166 * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
167 * @fcp: fcp_cmnd to setup
168 * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
169 */
170static inline
171void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
172{
173 char tag[2];
174
175 int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
176
177 if (scsi_populate_tag_msg(scsi, tag)) {
178 switch (tag[0]) {
179 case MSG_ORDERED_TAG:
180 fcp->fc_pri_ta |= FCP_PTA_ORDERED;
181 break;
182 case MSG_SIMPLE_TAG:
183 fcp->fc_pri_ta |= FCP_PTA_SIMPLE;
184 break;
185 };
186 } else
187 fcp->fc_pri_ta = FCP_PTA_SIMPLE;
188
189 if (scsi->sc_data_direction == DMA_FROM_DEVICE)
190 fcp->fc_flags |= FCP_CFL_RDDATA;
191 if (scsi->sc_data_direction == DMA_TO_DEVICE)
192 fcp->fc_flags |= FCP_CFL_WRDATA;
193
194 memcpy(fcp->fc_cdb, scsi->cmnd, scsi->cmd_len);
195
196 fcp->fc_dl = scsi_bufflen(scsi);
197}
198
199/**
200 * zfcp_fc_fcp_tm - setup FCP command as task management command
201 * @fcp: fcp_cmnd to setup
202 * @dev: scsi_device where to send the task management command
203 * @tm: task management flags to setup tm command
204 */
205static inline
206void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
207{
208 int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
209 fcp->fc_tm_flags |= tm_flags;
210}
211
212/**
213 * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
214 * @fcp_rsp: FCP RSP IU to evaluate
215 * @scsi: SCSI command where to update status and sense buffer
216 */
217static inline
218void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
219 struct scsi_cmnd *scsi)
220{
221 struct fcp_resp_rsp_info *rsp_info;
222 char *sense;
223 u32 sense_len, resid;
224 u8 rsp_flags;
225
226 set_msg_byte(scsi, COMMAND_COMPLETE);
227 scsi->result |= fcp_rsp->resp.fr_status;
228
229 rsp_flags = fcp_rsp->resp.fr_flags;
230
231 if (unlikely(rsp_flags & FCP_RSP_LEN_VAL)) {
232 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
233 if (rsp_info->rsp_code == FCP_TMF_CMPL)
234 set_host_byte(scsi, DID_OK);
235 else {
236 set_host_byte(scsi, DID_ERROR);
237 return;
238 }
239 }
240
241 if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
242 sense = (char *) &fcp_rsp[1];
243 if (rsp_flags & FCP_RSP_LEN_VAL)
244 sense += fcp_rsp->ext.fr_sns_len;
245 sense_len = min(fcp_rsp->ext.fr_sns_len,
246 (u32) SCSI_SENSE_BUFFERSIZE);
247 memcpy(scsi->sense_buffer, sense, sense_len);
248 }
249
250 if (unlikely(rsp_flags & FCP_RESID_UNDER)) {
251 resid = fcp_rsp->ext.fr_resid;
252 scsi_set_resid(scsi, resid);
253 if (scsi_bufflen(scsi) - resid < scsi->underflow &&
254 !(rsp_flags & FCP_SNS_LEN_VAL) &&
255 fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
256 set_host_byte(scsi, DID_ERROR);
257 }
258}
259
260#endif
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index f09c863dc6bd..482dcd97aa5d 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -10,7 +10,9 @@
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/blktrace_api.h> 12#include <linux/blktrace_api.h>
13#include <scsi/fc/fc_els.h>
13#include "zfcp_ext.h" 14#include "zfcp_ext.h"
15#include "zfcp_fc.h"
14#include "zfcp_dbf.h" 16#include "zfcp_dbf.h"
15 17
16static void zfcp_fsf_request_timeout_handler(unsigned long data) 18static void zfcp_fsf_request_timeout_handler(unsigned long data)
@@ -122,36 +124,32 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
122 124
123static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 125static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
124{ 126{
127 unsigned long flags;
125 struct fsf_status_read_buffer *sr_buf = req->data; 128 struct fsf_status_read_buffer *sr_buf = req->data;
126 struct zfcp_adapter *adapter = req->adapter; 129 struct zfcp_adapter *adapter = req->adapter;
127 struct zfcp_port *port; 130 struct zfcp_port *port;
128 int d_id = sr_buf->d_id & ZFCP_DID_MASK; 131 int d_id = ntoh24(sr_buf->d_id);
129 unsigned long flags;
130 132
131 read_lock_irqsave(&zfcp_data.config_lock, flags); 133 read_lock_irqsave(&adapter->port_list_lock, flags);
132 list_for_each_entry(port, &adapter->port_list_head, list) 134 list_for_each_entry(port, &adapter->port_list, list)
133 if (port->d_id == d_id) { 135 if (port->d_id == d_id) {
134 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
135 zfcp_erp_port_reopen(port, 0, "fssrpc1", req); 136 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
136 return; 137 break;
137 } 138 }
138 read_unlock_irqrestore(&zfcp_data.config_lock, flags); 139 read_unlock_irqrestore(&adapter->port_list_lock, flags);
139} 140}
140 141
141static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id, 142static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
142 struct fsf_link_down_info *link_down) 143 struct fsf_link_down_info *link_down)
143{ 144{
144 struct zfcp_adapter *adapter = req->adapter; 145 struct zfcp_adapter *adapter = req->adapter;
145 unsigned long flags;
146 146
147 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 147 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
148 return; 148 return;
149 149
150 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 150 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151 151
152 read_lock_irqsave(&zfcp_data.config_lock, flags);
153 zfcp_scsi_schedule_rports_block(adapter); 152 zfcp_scsi_schedule_rports_block(adapter);
154 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
155 153
156 if (!link_down) 154 if (!link_down)
157 goto out; 155 goto out;
@@ -291,7 +289,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
291 zfcp_erp_adapter_access_changed(adapter, "fssrh_3", 289 zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
292 req); 290 req);
293 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 291 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
294 schedule_work(&adapter->scan_work); 292 queue_work(adapter->work_queue, &adapter->scan_work);
295 break; 293 break;
296 case FSF_STATUS_READ_CFDC_UPDATED: 294 case FSF_STATUS_READ_CFDC_UPDATED:
297 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req); 295 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
@@ -317,7 +315,6 @@ static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
317 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 315 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
318 return; 316 return;
319 case FSF_SQ_COMMAND_ABORTED: 317 case FSF_SQ_COMMAND_ABORTED:
320 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
321 break; 318 break;
322 case FSF_SQ_NO_RECOM: 319 case FSF_SQ_NO_RECOM:
323 dev_err(&req->adapter->ccw_device->dev, 320 dev_err(&req->adapter->ccw_device->dev,
@@ -358,8 +355,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
358 zfcp_dbf_hba_fsf_response(req); 355 zfcp_dbf_hba_fsf_response(req);
359 356
360 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 357 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
361 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 358 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
362 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
363 return; 359 return;
364 } 360 }
365 361
@@ -377,7 +373,7 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
377 case FSF_PROT_ERROR_STATE: 373 case FSF_PROT_ERROR_STATE:
378 case FSF_PROT_SEQ_NUMB_ERROR: 374 case FSF_PROT_SEQ_NUMB_ERROR:
379 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req); 375 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
380 req->status |= ZFCP_STATUS_FSFREQ_RETRY; 376 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
381 break; 377 break;
382 case FSF_PROT_UNSUPP_QTCB_TYPE: 378 case FSF_PROT_UNSUPP_QTCB_TYPE:
383 dev_err(&adapter->ccw_device->dev, 379 dev_err(&adapter->ccw_device->dev,
@@ -480,20 +476,27 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
480 476
481static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 477static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482{ 478{
483 struct fsf_qtcb_bottom_config *bottom; 479 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
484 struct zfcp_adapter *adapter = req->adapter; 480 struct zfcp_adapter *adapter = req->adapter;
485 struct Scsi_Host *shost = adapter->scsi_host; 481 struct Scsi_Host *shost = adapter->scsi_host;
482 struct fc_els_flogi *nsp, *plogi;
486 483
487 bottom = &req->qtcb->bottom.config; 484 /* adjust pointers for missing command code */
485 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
486 - sizeof(u32));
487 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
488 - sizeof(u32));
488 489
489 if (req->data) 490 if (req->data)
490 memcpy(req->data, bottom, sizeof(*bottom)); 491 memcpy(req->data, bottom, sizeof(*bottom));
491 492
492 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn; 493 fc_host_port_name(shost) = nsp->fl_wwpn;
493 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn; 494 fc_host_node_name(shost) = nsp->fl_wwnn;
494 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK; 495 fc_host_port_id(shost) = ntoh24(bottom->s_id);
495 fc_host_speed(shost) = bottom->fc_link_speed; 496 fc_host_speed(shost) = bottom->fc_link_speed;
496 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 497 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
498 fc_host_supported_fc4s(shost)[2] = 1; /* FCP */
499 fc_host_active_fc4s(shost)[2] = 1; /* FCP */
497 500
498 adapter->hydra_version = bottom->adapter_type; 501 adapter->hydra_version = bottom->adapter_type;
499 adapter->timer_ticks = bottom->timer_interval; 502 adapter->timer_ticks = bottom->timer_interval;
@@ -503,9 +506,9 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
503 506
504 switch (bottom->fc_topology) { 507 switch (bottom->fc_topology) {
505 case FSF_TOPO_P2P: 508 case FSF_TOPO_P2P:
506 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK; 509 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
507 adapter->peer_wwpn = bottom->plogi_payload.wwpn; 510 adapter->peer_wwpn = plogi->fl_wwpn;
508 adapter->peer_wwnn = bottom->plogi_payload.wwnn; 511 adapter->peer_wwnn = plogi->fl_wwnn;
509 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 512 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
510 break; 513 break;
511 case FSF_TOPO_FABRIC: 514 case FSF_TOPO_FABRIC:
@@ -881,13 +884,11 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
881 break; 884 break;
882 case FSF_PORT_BOXED: 885 case FSF_PORT_BOXED:
883 zfcp_erp_port_boxed(unit->port, "fsafch3", req); 886 zfcp_erp_port_boxed(unit->port, "fsafch3", req);
884 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 887 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
885 ZFCP_STATUS_FSFREQ_RETRY;
886 break; 888 break;
887 case FSF_LUN_BOXED: 889 case FSF_LUN_BOXED:
888 zfcp_erp_unit_boxed(unit, "fsafch4", req); 890 zfcp_erp_unit_boxed(unit, "fsafch4", req);
889 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 891 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
890 ZFCP_STATUS_FSFREQ_RETRY;
891 break; 892 break;
892 case FSF_ADAPTER_STATUS_AVAILABLE: 893 case FSF_ADAPTER_STATUS_AVAILABLE:
893 switch (fsq->word[0]) { 894 switch (fsq->word[0]) {
@@ -958,10 +959,10 @@ out:
958static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 959static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
959{ 960{
960 struct zfcp_adapter *adapter = req->adapter; 961 struct zfcp_adapter *adapter = req->adapter;
961 struct zfcp_send_ct *send_ct = req->data; 962 struct zfcp_fsf_ct_els *ct = req->data;
962 struct fsf_qtcb_header *header = &req->qtcb->header; 963 struct fsf_qtcb_header *header = &req->qtcb->header;
963 964
964 send_ct->status = -EINVAL; 965 ct->status = -EINVAL;
965 966
966 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 967 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
967 goto skip_fsfstatus; 968 goto skip_fsfstatus;
@@ -969,7 +970,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
969 switch (header->fsf_status) { 970 switch (header->fsf_status) {
970 case FSF_GOOD: 971 case FSF_GOOD:
971 zfcp_dbf_san_ct_response(req); 972 zfcp_dbf_san_ct_response(req);
972 send_ct->status = 0; 973 ct->status = 0;
973 break; 974 break;
974 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 975 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
975 zfcp_fsf_class_not_supp(req); 976 zfcp_fsf_class_not_supp(req);
@@ -985,8 +986,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
985 case FSF_ACCESS_DENIED: 986 case FSF_ACCESS_DENIED:
986 break; 987 break;
987 case FSF_PORT_BOXED: 988 case FSF_PORT_BOXED:
988 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 989 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
989 ZFCP_STATUS_FSFREQ_RETRY;
990 break; 990 break;
991 case FSF_PORT_HANDLE_NOT_VALID: 991 case FSF_PORT_HANDLE_NOT_VALID:
992 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); 992 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
@@ -1001,8 +1001,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1001 } 1001 }
1002 1002
1003skip_fsfstatus: 1003skip_fsfstatus:
1004 if (send_ct->handler) 1004 if (ct->handler)
1005 send_ct->handler(send_ct->handler_data); 1005 ct->handler(ct->handler_data);
1006} 1006}
1007 1007
1008static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale, 1008static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
@@ -1058,14 +1058,30 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req, 1058 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1059 SBAL_FLAGS0_TYPE_WRITE_READ, 1059 SBAL_FLAGS0_TYPE_WRITE_READ,
1060 sg_resp, max_sbals); 1060 sg_resp, max_sbals);
1061 req->qtcb->bottom.support.resp_buf_length = bytes;
1061 if (bytes <= 0) 1062 if (bytes <= 0)
1062 return -EIO; 1063 return -EIO;
1063 1064
1065 return 0;
1066}
1067
1068static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1069 struct scatterlist *sg_req,
1070 struct scatterlist *sg_resp,
1071 int max_sbals)
1072{
1073 int ret;
1074 unsigned int fcp_chan_timeout;
1075
1076 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
1077 if (ret)
1078 return ret;
1079
1064 /* common settings for ct/gs and els requests */ 1080 /* common settings for ct/gs and els requests */
1065 req->qtcb->bottom.support.resp_buf_length = bytes; 1081 fcp_chan_timeout = 2 * FC_DEF_R_A_TOV / 1000;
1066 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1082 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1067 req->qtcb->bottom.support.timeout = 2 * R_A_TOV; 1083 req->qtcb->bottom.support.timeout = fcp_chan_timeout;
1068 zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10); 1084 zfcp_fsf_start_timer(req, (fcp_chan_timeout + 10) * HZ);
1069 1085
1070 return 0; 1086 return 0;
1071} 1087}
@@ -1075,9 +1091,9 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1075 * @ct: pointer to struct zfcp_send_ct with data for request 1091 * @ct: pointer to struct zfcp_send_ct with data for request
1076 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1092 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1077 */ 1093 */
1078int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool) 1094int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1095 struct zfcp_fsf_ct_els *ct, mempool_t *pool)
1079{ 1096{
1080 struct zfcp_wka_port *wka_port = ct->wka_port;
1081 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1097 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1082 struct zfcp_fsf_req *req; 1098 struct zfcp_fsf_req *req;
1083 int ret = -EIO; 1099 int ret = -EIO;
@@ -1094,8 +1110,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1094 } 1110 }
1095 1111
1096 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1112 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1097 ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp, 1113 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1098 FSF_MAX_SBALS_PER_REQ); 1114 FSF_MAX_SBALS_PER_REQ);
1099 if (ret) 1115 if (ret)
1100 goto failed_send; 1116 goto failed_send;
1101 1117
@@ -1103,7 +1119,7 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1103 req->qtcb->header.port_handle = wka_port->handle; 1119 req->qtcb->header.port_handle = wka_port->handle;
1104 req->data = ct; 1120 req->data = ct;
1105 1121
1106 zfcp_dbf_san_ct_request(req); 1122 zfcp_dbf_san_ct_request(req, wka_port->d_id);
1107 1123
1108 ret = zfcp_fsf_req_send(req); 1124 ret = zfcp_fsf_req_send(req);
1109 if (ret) 1125 if (ret)
@@ -1120,7 +1136,7 @@ out:
1120 1136
1121static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1137static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1122{ 1138{
1123 struct zfcp_send_els *send_els = req->data; 1139 struct zfcp_fsf_ct_els *send_els = req->data;
1124 struct zfcp_port *port = send_els->port; 1140 struct zfcp_port *port = send_els->port;
1125 struct fsf_qtcb_header *header = &req->qtcb->header; 1141 struct fsf_qtcb_header *header = &req->qtcb->header;
1126 1142
@@ -1140,9 +1156,6 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1140 case FSF_ADAPTER_STATUS_AVAILABLE: 1156 case FSF_ADAPTER_STATUS_AVAILABLE:
1141 switch (header->fsf_status_qual.word[0]){ 1157 switch (header->fsf_status_qual.word[0]){
1142 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1158 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1143 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1144 zfcp_fc_test_link(port);
1145 /*fall through */
1146 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1159 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1147 case FSF_SQ_RETRY_IF_POSSIBLE: 1160 case FSF_SQ_RETRY_IF_POSSIBLE:
1148 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1161 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1174,10 +1187,11 @@ skip_fsfstatus:
1174 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1187 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1175 * @els: pointer to struct zfcp_send_els with data for the command 1188 * @els: pointer to struct zfcp_send_els with data for the command
1176 */ 1189 */
1177int zfcp_fsf_send_els(struct zfcp_send_els *els) 1190int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1191 struct zfcp_fsf_ct_els *els)
1178{ 1192{
1179 struct zfcp_fsf_req *req; 1193 struct zfcp_fsf_req *req;
1180 struct zfcp_qdio *qdio = els->adapter->qdio; 1194 struct zfcp_qdio *qdio = adapter->qdio;
1181 int ret = -EIO; 1195 int ret = -EIO;
1182 1196
1183 spin_lock_bh(&qdio->req_q_lock); 1197 spin_lock_bh(&qdio->req_q_lock);
@@ -1192,12 +1206,12 @@ int zfcp_fsf_send_els(struct zfcp_send_els *els)
1192 } 1206 }
1193 1207
1194 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1195 ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2); 1209 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2);
1196 1210
1197 if (ret) 1211 if (ret)
1198 goto failed_send; 1212 goto failed_send;
1199 1213
1200 req->qtcb->bottom.support.d_id = els->d_id; 1214 hton24(req->qtcb->bottom.support.d_id, d_id);
1201 req->handler = zfcp_fsf_send_els_handler; 1215 req->handler = zfcp_fsf_send_els_handler;
1202 req->data = els; 1216 req->data = els;
1203 1217
@@ -1408,7 +1422,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1408{ 1422{
1409 struct zfcp_port *port = req->data; 1423 struct zfcp_port *port = req->data;
1410 struct fsf_qtcb_header *header = &req->qtcb->header; 1424 struct fsf_qtcb_header *header = &req->qtcb->header;
1411 struct fsf_plogi *plogi; 1425 struct fc_els_flogi *plogi;
1412 1426
1413 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1427 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1414 goto out; 1428 goto out;
@@ -1458,16 +1472,10 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1458 * another GID_PN straight after a port has been opened. 1472 * another GID_PN straight after a port has been opened.
1459 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1473 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1460 */ 1474 */
1461 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; 1475 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1462 if (req->qtcb->bottom.support.els1_length >= 1476 if (req->qtcb->bottom.support.els1_length >=
1463 FSF_PLOGI_MIN_LEN) { 1477 FSF_PLOGI_MIN_LEN)
1464 if (plogi->serv_param.wwpn != port->wwpn)
1465 port->d_id = 0;
1466 else {
1467 port->wwnn = plogi->serv_param.wwnn;
1468 zfcp_fc_plogi_evaluate(port, plogi); 1478 zfcp_fc_plogi_evaluate(port, plogi);
1469 }
1470 }
1471 break; 1479 break;
1472 case FSF_UNKNOWN_OP_SUBTYPE: 1480 case FSF_UNKNOWN_OP_SUBTYPE:
1473 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1481 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1475,7 +1483,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1475 } 1483 }
1476 1484
1477out: 1485out:
1478 zfcp_port_put(port); 1486 put_device(&port->sysfs_device);
1479} 1487}
1480 1488
1481/** 1489/**
@@ -1509,18 +1517,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1509 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1517 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1510 1518
1511 req->handler = zfcp_fsf_open_port_handler; 1519 req->handler = zfcp_fsf_open_port_handler;
1512 req->qtcb->bottom.support.d_id = port->d_id; 1520 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1513 req->data = port; 1521 req->data = port;
1514 req->erp_action = erp_action; 1522 req->erp_action = erp_action;
1515 erp_action->fsf_req = req; 1523 erp_action->fsf_req = req;
1516 zfcp_port_get(port); 1524 get_device(&port->sysfs_device);
1517 1525
1518 zfcp_fsf_start_erp_timer(req); 1526 zfcp_fsf_start_erp_timer(req);
1519 retval = zfcp_fsf_req_send(req); 1527 retval = zfcp_fsf_req_send(req);
1520 if (retval) { 1528 if (retval) {
1521 zfcp_fsf_req_free(req); 1529 zfcp_fsf_req_free(req);
1522 erp_action->fsf_req = NULL; 1530 erp_action->fsf_req = NULL;
1523 zfcp_port_put(port); 1531 put_device(&port->sysfs_device);
1524 } 1532 }
1525out: 1533out:
1526 spin_unlock_bh(&qdio->req_q_lock); 1534 spin_unlock_bh(&qdio->req_q_lock);
@@ -1597,11 +1605,11 @@ out:
1597 1605
1598static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1606static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1599{ 1607{
1600 struct zfcp_wka_port *wka_port = req->data; 1608 struct zfcp_fc_wka_port *wka_port = req->data;
1601 struct fsf_qtcb_header *header = &req->qtcb->header; 1609 struct fsf_qtcb_header *header = &req->qtcb->header;
1602 1610
1603 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1611 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1604 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1612 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1605 goto out; 1613 goto out;
1606 } 1614 }
1607 1615
@@ -1614,13 +1622,13 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1614 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1622 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1615 /* fall through */ 1623 /* fall through */
1616 case FSF_ACCESS_DENIED: 1624 case FSF_ACCESS_DENIED:
1617 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1625 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1618 break; 1626 break;
1619 case FSF_GOOD: 1627 case FSF_GOOD:
1620 wka_port->handle = header->port_handle; 1628 wka_port->handle = header->port_handle;
1621 /* fall through */ 1629 /* fall through */
1622 case FSF_PORT_ALREADY_OPEN: 1630 case FSF_PORT_ALREADY_OPEN:
1623 wka_port->status = ZFCP_WKA_PORT_ONLINE; 1631 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1624 } 1632 }
1625out: 1633out:
1626 wake_up(&wka_port->completion_wq); 1634 wake_up(&wka_port->completion_wq);
@@ -1628,10 +1636,10 @@ out:
1628 1636
1629/** 1637/**
1630 * zfcp_fsf_open_wka_port - create and send open wka-port request 1638 * zfcp_fsf_open_wka_port - create and send open wka-port request
1631 * @wka_port: pointer to struct zfcp_wka_port 1639 * @wka_port: pointer to struct zfcp_fc_wka_port
1632 * Returns: 0 on success, error otherwise 1640 * Returns: 0 on success, error otherwise
1633 */ 1641 */
1634int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port) 1642int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1635{ 1643{
1636 struct qdio_buffer_element *sbale; 1644 struct qdio_buffer_element *sbale;
1637 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1645 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1656,7 +1664,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1656 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 1664 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1657 1665
1658 req->handler = zfcp_fsf_open_wka_port_handler; 1666 req->handler = zfcp_fsf_open_wka_port_handler;
1659 req->qtcb->bottom.support.d_id = wka_port->d_id; 1667 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1660 req->data = wka_port; 1668 req->data = wka_port;
1661 1669
1662 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1670 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
@@ -1670,23 +1678,23 @@ out:
1670 1678
1671static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1679static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1672{ 1680{
1673 struct zfcp_wka_port *wka_port = req->data; 1681 struct zfcp_fc_wka_port *wka_port = req->data;
1674 1682
1675 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1683 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1676 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1684 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1677 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req); 1685 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1678 } 1686 }
1679 1687
1680 wka_port->status = ZFCP_WKA_PORT_OFFLINE; 1688 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1681 wake_up(&wka_port->completion_wq); 1689 wake_up(&wka_port->completion_wq);
1682} 1690}
1683 1691
1684/** 1692/**
1685 * zfcp_fsf_close_wka_port - create and send close wka port request 1693 * zfcp_fsf_close_wka_port - create and send close wka port request
1686 * @erp_action: pointer to struct zfcp_erp_action 1694 * @wka_port: WKA port to open
1687 * Returns: 0 on success, error otherwise 1695 * Returns: 0 on success, error otherwise
1688 */ 1696 */
1689int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port) 1697int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1690{ 1698{
1691 struct qdio_buffer_element *sbale; 1699 struct qdio_buffer_element *sbale;
1692 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1700 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
@@ -1744,13 +1752,13 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1744 /* can't use generic zfcp_erp_modify_port_status because 1752 /* can't use generic zfcp_erp_modify_port_status because
1745 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1753 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1746 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1754 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1747 list_for_each_entry(unit, &port->unit_list_head, list) 1755 read_lock(&port->unit_list_lock);
1756 list_for_each_entry(unit, &port->unit_list, list)
1748 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1757 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1749 &unit->status); 1758 &unit->status);
1759 read_unlock(&port->unit_list_lock);
1750 zfcp_erp_port_boxed(port, "fscpph2", req); 1760 zfcp_erp_port_boxed(port, "fscpph2", req);
1751 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1761 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1752 ZFCP_STATUS_FSFREQ_RETRY;
1753
1754 break; 1762 break;
1755 case FSF_ADAPTER_STATUS_AVAILABLE: 1763 case FSF_ADAPTER_STATUS_AVAILABLE:
1756 switch (header->fsf_status_qual.word[0]) { 1764 switch (header->fsf_status_qual.word[0]) {
@@ -1766,9 +1774,11 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1766 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1774 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1767 */ 1775 */
1768 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1776 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1769 list_for_each_entry(unit, &port->unit_list_head, list) 1777 read_lock(&port->unit_list_lock);
1778 list_for_each_entry(unit, &port->unit_list, list)
1770 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1779 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1771 &unit->status); 1780 &unit->status);
1781 read_unlock(&port->unit_list_lock);
1772 break; 1782 break;
1773 } 1783 }
1774} 1784}
@@ -1852,8 +1862,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1852 break; 1862 break;
1853 case FSF_PORT_BOXED: 1863 case FSF_PORT_BOXED:
1854 zfcp_erp_port_boxed(unit->port, "fsouh_2", req); 1864 zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1855 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 1865 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1856 ZFCP_STATUS_FSFREQ_RETRY;
1857 break; 1866 break;
1858 case FSF_LUN_SHARING_VIOLATION: 1867 case FSF_LUN_SHARING_VIOLATION:
1859 if (header->fsf_status_qual.word[0]) 1868 if (header->fsf_status_qual.word[0])
@@ -2015,8 +2024,7 @@ static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2015 break; 2024 break;
2016 case FSF_PORT_BOXED: 2025 case FSF_PORT_BOXED:
2017 zfcp_erp_port_boxed(unit->port, "fscuh_3", req); 2026 zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2018 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2027 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2019 ZFCP_STATUS_FSFREQ_RETRY;
2020 break; 2028 break;
2021 case FSF_ADAPTER_STATUS_AVAILABLE: 2029 case FSF_ADAPTER_STATUS_AVAILABLE:
2022 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2030 switch (req->qtcb->header.fsf_status_qual.word[0]) {
@@ -2088,72 +2096,57 @@ static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2088 lat_rec->max = max(lat_rec->max, lat); 2096 lat_rec->max = max(lat_rec->max, lat);
2089} 2097}
2090 2098
2091static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req) 2099static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2092{ 2100{
2093 struct fsf_qual_latency_info *lat_inf; 2101 struct fsf_qual_latency_info *lat_in;
2094 struct latency_cont *lat; 2102 struct latency_cont *lat = NULL;
2095 struct zfcp_unit *unit = req->unit; 2103 struct zfcp_unit *unit = req->unit;
2104 struct zfcp_blk_drv_data blktrc;
2105 int ticks = req->adapter->timer_ticks;
2096 2106
2097 lat_inf = &req->qtcb->prefix.prot_status_qual.latency_info; 2107 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2098
2099 switch (req->qtcb->bottom.io.data_direction) {
2100 case FSF_DATADIR_READ:
2101 lat = &unit->latencies.read;
2102 break;
2103 case FSF_DATADIR_WRITE:
2104 lat = &unit->latencies.write;
2105 break;
2106 case FSF_DATADIR_CMND:
2107 lat = &unit->latencies.cmd;
2108 break;
2109 default:
2110 return;
2111 }
2112
2113 spin_lock(&unit->latencies.lock);
2114 zfcp_fsf_update_lat(&lat->channel, lat_inf->channel_lat);
2115 zfcp_fsf_update_lat(&lat->fabric, lat_inf->fabric_lat);
2116 lat->counter++;
2117 spin_unlock(&unit->latencies.lock);
2118}
2119 2108
2120#ifdef CONFIG_BLK_DEV_IO_TRACE 2109 blktrc.flags = 0;
2121static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req) 2110 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2122{ 2111 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2123 struct fsf_qual_latency_info *lat_inf; 2112 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2124 struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data; 2113 blktrc.inb_usage = req->queue_req.qdio_inb_usage;
2125 struct request *req = scsi_cmnd->request; 2114 blktrc.outb_usage = req->queue_req.qdio_outb_usage;
2126 struct zfcp_blk_drv_data trace; 2115
2127 int ticks = fsf_req->adapter->timer_ticks; 2116 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2117 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2118 blktrc.channel_lat = lat_in->channel_lat * ticks;
2119 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2120
2121 switch (req->qtcb->bottom.io.data_direction) {
2122 case FSF_DATADIR_READ:
2123 lat = &unit->latencies.read;
2124 break;
2125 case FSF_DATADIR_WRITE:
2126 lat = &unit->latencies.write;
2127 break;
2128 case FSF_DATADIR_CMND:
2129 lat = &unit->latencies.cmd;
2130 break;
2131 }
2128 2132
2129 trace.flags = 0; 2133 if (lat) {
2130 trace.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2134 spin_lock(&unit->latencies.lock);
2131 if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) { 2135 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2132 trace.flags |= ZFCP_BLK_LAT_VALID; 2136 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2133 lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info; 2137 lat->counter++;
2134 trace.channel_lat = lat_inf->channel_lat * ticks; 2138 spin_unlock(&unit->latencies.lock);
2135 trace.fabric_lat = lat_inf->fabric_lat * ticks; 2139 }
2136 } 2140 }
2137 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2138 trace.flags |= ZFCP_BLK_REQ_ERROR;
2139 trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
2140 trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
2141 2141
2142 blk_add_driver_data(req->q, req, &trace, sizeof(trace)); 2142 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2143} 2143 sizeof(blktrc));
2144#else
2145static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2146{
2147} 2144}
2148#endif
2149 2145
2150static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2146static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2151{ 2147{
2152 struct scsi_cmnd *scpnt; 2148 struct scsi_cmnd *scpnt;
2153 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2149 struct fcp_resp_with_ext *fcp_rsp;
2154 &(req->qtcb->bottom.io.fcp_rsp);
2155 u32 sns_len;
2156 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2157 unsigned long flags; 2150 unsigned long flags;
2158 2151
2159 read_lock_irqsave(&req->adapter->abort_lock, flags); 2152 read_lock_irqsave(&req->adapter->abort_lock, flags);
@@ -2164,50 +2157,16 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2164 return; 2157 return;
2165 } 2158 }
2166 2159
2167 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2168 set_host_byte(scpnt, DID_SOFT_ERROR);
2169 goto skip_fsfstatus;
2170 }
2171
2172 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2160 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2173 set_host_byte(scpnt, DID_ERROR); 2161 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2174 goto skip_fsfstatus; 2162 goto skip_fsfstatus;
2175 } 2163 }
2176 2164
2177 set_msg_byte(scpnt, COMMAND_COMPLETE); 2165 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2178 2166 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2179 scpnt->result |= fcp_rsp_iu->scsi_status;
2180
2181 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2182 zfcp_fsf_req_latency(req);
2183
2184 zfcp_fsf_trace_latency(req);
2185
2186 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2187 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2188 set_host_byte(scpnt, DID_OK);
2189 else {
2190 set_host_byte(scpnt, DID_ERROR);
2191 goto skip_fsfstatus;
2192 }
2193 }
2194 2167
2195 if (unlikely(fcp_rsp_iu->validity.bits.fcp_sns_len_valid)) { 2168 zfcp_fsf_req_trace(req, scpnt);
2196 sns_len = FSF_FCP_RSP_SIZE - sizeof(struct fcp_rsp_iu) +
2197 fcp_rsp_iu->fcp_rsp_len;
2198 sns_len = min(sns_len, (u32) SCSI_SENSE_BUFFERSIZE);
2199 sns_len = min(sns_len, fcp_rsp_iu->fcp_sns_len);
2200 2169
2201 memcpy(scpnt->sense_buffer,
2202 zfcp_get_fcp_sns_info_ptr(fcp_rsp_iu), sns_len);
2203 }
2204
2205 if (unlikely(fcp_rsp_iu->validity.bits.fcp_resid_under)) {
2206 scsi_set_resid(scpnt, fcp_rsp_iu->fcp_resid);
2207 if (scsi_bufflen(scpnt) - scsi_get_resid(scpnt) <
2208 scpnt->underflow)
2209 set_host_byte(scpnt, DID_ERROR);
2210 }
2211skip_fsfstatus: 2170skip_fsfstatus:
2212 if (scpnt->result != 0) 2171 if (scpnt->result != 0)
2213 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req); 2172 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
@@ -2229,11 +2188,13 @@ skip_fsfstatus:
2229 2188
2230static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req) 2189static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2231{ 2190{
2232 struct fcp_rsp_iu *fcp_rsp_iu = (struct fcp_rsp_iu *) 2191 struct fcp_resp_with_ext *fcp_rsp;
2233 &(req->qtcb->bottom.io.fcp_rsp); 2192 struct fcp_resp_rsp_info *rsp_info;
2234 char *fcp_rsp_info = (unsigned char *) &fcp_rsp_iu[1];
2235 2193
2236 if ((fcp_rsp_info[3] != RSP_CODE_GOOD) || 2194 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2195 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2196
2197 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2237 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2198 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2238 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2199 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2239} 2200}
@@ -2293,13 +2254,11 @@ static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2293 break; 2254 break;
2294 case FSF_PORT_BOXED: 2255 case FSF_PORT_BOXED:
2295 zfcp_erp_port_boxed(unit->port, "fssfch5", req); 2256 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2296 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2257 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2297 ZFCP_STATUS_FSFREQ_RETRY;
2298 break; 2258 break;
2299 case FSF_LUN_BOXED: 2259 case FSF_LUN_BOXED:
2300 zfcp_erp_unit_boxed(unit, "fssfch6", req); 2260 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2301 req->status |= ZFCP_STATUS_FSFREQ_ERROR | 2261 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2302 ZFCP_STATUS_FSFREQ_RETRY;
2303 break; 2262 break;
2304 case FSF_ADAPTER_STATUS_AVAILABLE: 2263 case FSF_ADAPTER_STATUS_AVAILABLE:
2305 if (header->fsf_status_qual.word[0] == 2264 if (header->fsf_status_qual.word[0] ==
@@ -2314,24 +2273,10 @@ skip_fsfstatus:
2314 else { 2273 else {
2315 zfcp_fsf_send_fcp_command_task_handler(req); 2274 zfcp_fsf_send_fcp_command_task_handler(req);
2316 req->unit = NULL; 2275 req->unit = NULL;
2317 zfcp_unit_put(unit); 2276 put_device(&unit->sysfs_device);
2318 } 2277 }
2319} 2278}
2320 2279
2321static void zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, u32 fcp_dl)
2322{
2323 u32 *fcp_dl_ptr;
2324
2325 /*
2326 * fcp_dl_addr = start address of fcp_cmnd structure +
2327 * size of fixed part + size of dynamically sized add_dcp_cdb field
2328 * SEE FCP-2 documentation
2329 */
2330 fcp_dl_ptr = (u32 *) ((unsigned char *) &fcp_cmd[1] +
2331 (fcp_cmd->add_fcp_cdb_length << 2));
2332 *fcp_dl_ptr = fcp_dl;
2333}
2334
2335/** 2280/**
2336 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command) 2281 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2337 * @unit: unit where command is sent to 2282 * @unit: unit where command is sent to
@@ -2341,7 +2286,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2341 struct scsi_cmnd *scsi_cmnd) 2286 struct scsi_cmnd *scsi_cmnd)
2342{ 2287{
2343 struct zfcp_fsf_req *req; 2288 struct zfcp_fsf_req *req;
2344 struct fcp_cmnd_iu *fcp_cmnd_iu; 2289 struct fcp_cmnd *fcp_cmnd;
2345 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2290 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2346 int real_bytes, retval = -EIO; 2291 int real_bytes, retval = -EIO;
2347 struct zfcp_adapter *adapter = unit->port->adapter; 2292 struct zfcp_adapter *adapter = unit->port->adapter;
@@ -2366,23 +2311,21 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2366 } 2311 }
2367 2312
2368 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2313 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2369 zfcp_unit_get(unit); 2314 get_device(&unit->sysfs_device);
2370 req->unit = unit; 2315 req->unit = unit;
2371 req->data = scsi_cmnd; 2316 req->data = scsi_cmnd;
2372 req->handler = zfcp_fsf_send_fcp_command_handler; 2317 req->handler = zfcp_fsf_send_fcp_command_handler;
2373 req->qtcb->header.lun_handle = unit->handle; 2318 req->qtcb->header.lun_handle = unit->handle;
2374 req->qtcb->header.port_handle = unit->port->handle; 2319 req->qtcb->header.port_handle = unit->port->handle;
2375 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2320 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2321 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2376 2322
2377 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2323 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2378 2324
2379 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &(req->qtcb->bottom.io.fcp_cmnd);
2380 fcp_cmnd_iu->fcp_lun = unit->fcp_lun;
2381 /* 2325 /*
2382 * set depending on data direction: 2326 * set depending on data direction:
2383 * data direction bits in SBALE (SB Type) 2327 * data direction bits in SBALE (SB Type)
2384 * data direction bits in QTCB 2328 * data direction bits in QTCB
2385 * data direction bits in FCP_CMND IU
2386 */ 2329 */
2387 switch (scsi_cmnd->sc_data_direction) { 2330 switch (scsi_cmnd->sc_data_direction) {
2388 case DMA_NONE: 2331 case DMA_NONE:
@@ -2390,32 +2333,17 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2390 break; 2333 break;
2391 case DMA_FROM_DEVICE: 2334 case DMA_FROM_DEVICE:
2392 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; 2335 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2393 fcp_cmnd_iu->rddata = 1;
2394 break; 2336 break;
2395 case DMA_TO_DEVICE: 2337 case DMA_TO_DEVICE:
2396 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE; 2338 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2397 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2339 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2398 fcp_cmnd_iu->wddata = 1;
2399 break; 2340 break;
2400 case DMA_BIDIRECTIONAL: 2341 case DMA_BIDIRECTIONAL:
2401 goto failed_scsi_cmnd; 2342 goto failed_scsi_cmnd;
2402 } 2343 }
2403 2344
2404 if (likely((scsi_cmnd->device->simple_tags) || 2345 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2405 ((atomic_read(&unit->status) & ZFCP_STATUS_UNIT_READONLY) && 2346 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2406 (atomic_read(&unit->status) & ZFCP_STATUS_UNIT_SHARED))))
2407 fcp_cmnd_iu->task_attribute = SIMPLE_Q;
2408 else
2409 fcp_cmnd_iu->task_attribute = UNTAGGED;
2410
2411 if (unlikely(scsi_cmnd->cmd_len > FCP_CDB_LENGTH))
2412 fcp_cmnd_iu->add_fcp_cdb_length =
2413 (scsi_cmnd->cmd_len - FCP_CDB_LENGTH) >> 2;
2414
2415 memcpy(fcp_cmnd_iu->fcp_cdb, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
2416
2417 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
2418 fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
2419 2347
2420 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype, 2348 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2421 scsi_sglist(scsi_cmnd), 2349 scsi_sglist(scsi_cmnd),
@@ -2433,8 +2361,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2433 goto failed_scsi_cmnd; 2361 goto failed_scsi_cmnd;
2434 } 2362 }
2435 2363
2436 zfcp_set_fcp_dl(fcp_cmnd_iu, real_bytes);
2437
2438 retval = zfcp_fsf_req_send(req); 2364 retval = zfcp_fsf_req_send(req);
2439 if (unlikely(retval)) 2365 if (unlikely(retval))
2440 goto failed_scsi_cmnd; 2366 goto failed_scsi_cmnd;
@@ -2442,7 +2368,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2442 goto out; 2368 goto out;
2443 2369
2444failed_scsi_cmnd: 2370failed_scsi_cmnd:
2445 zfcp_unit_put(unit); 2371 put_device(&unit->sysfs_device);
2446 zfcp_fsf_req_free(req); 2372 zfcp_fsf_req_free(req);
2447 scsi_cmnd->host_scribble = NULL; 2373 scsi_cmnd->host_scribble = NULL;
2448out: 2374out:
@@ -2460,7 +2386,7 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2460{ 2386{
2461 struct qdio_buffer_element *sbale; 2387 struct qdio_buffer_element *sbale;
2462 struct zfcp_fsf_req *req = NULL; 2388 struct zfcp_fsf_req *req = NULL;
2463 struct fcp_cmnd_iu *fcp_cmnd_iu; 2389 struct fcp_cmnd *fcp_cmnd;
2464 struct zfcp_qdio *qdio = unit->port->adapter->qdio; 2390 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2465 2391
2466 if (unlikely(!(atomic_read(&unit->status) & 2392 if (unlikely(!(atomic_read(&unit->status) &
@@ -2486,16 +2412,14 @@ struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2486 req->qtcb->header.port_handle = unit->port->handle; 2412 req->qtcb->header.port_handle = unit->port->handle;
2487 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2413 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2488 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2414 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2489 req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) + 2415 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2490 sizeof(u32);
2491 2416
2492 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req); 2417 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2493 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE; 2418 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2494 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; 2419 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2495 2420
2496 fcp_cmnd_iu = (struct fcp_cmnd_iu *) &req->qtcb->bottom.io.fcp_cmnd; 2421 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2497 fcp_cmnd_iu->fcp_lun = unit->fcp_lun; 2422 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
2498 fcp_cmnd_iu->task_management_flags = tm_flags;
2499 2423
2500 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2424 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2501 if (!zfcp_fsf_req_send(req)) 2425 if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index dcc7c1dbcf58..b3de682b64cf 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/pfn.h> 12#include <linux/pfn.h>
13#include <linux/scatterlist.h> 13#include <linux/scatterlist.h>
14#include <scsi/libfc.h>
14 15
15#define FSF_QTCB_CURRENT_VERSION 0x00000001 16#define FSF_QTCB_CURRENT_VERSION 0x00000001
16 17
@@ -228,7 +229,8 @@ struct fsf_status_read_buffer {
228 u32 length; 229 u32 length;
229 u32 res1; 230 u32 res1;
230 struct fsf_queue_designator queue_designator; 231 struct fsf_queue_designator queue_designator;
231 u32 d_id; 232 u8 res2;
233 u8 d_id[3];
232 u32 class; 234 u32 class;
233 u64 fcp_lun; 235 u64 fcp_lun;
234 u8 res3[24]; 236 u8 res3[24];
@@ -309,22 +311,7 @@ struct fsf_qtcb_header {
309 u8 res4[16]; 311 u8 res4[16];
310} __attribute__ ((packed)); 312} __attribute__ ((packed));
311 313
312struct fsf_nport_serv_param {
313 u8 common_serv_param[16];
314 u64 wwpn;
315 u64 wwnn;
316 u8 class1_serv_param[16];
317 u8 class2_serv_param[16];
318 u8 class3_serv_param[16];
319 u8 class4_serv_param[16];
320 u8 vendor_version_level[16];
321} __attribute__ ((packed));
322
323#define FSF_PLOGI_MIN_LEN 112 314#define FSF_PLOGI_MIN_LEN 112
324struct fsf_plogi {
325 u32 code;
326 struct fsf_nport_serv_param serv_param;
327} __attribute__ ((packed));
328 315
329#define FSF_FCP_CMND_SIZE 288 316#define FSF_FCP_CMND_SIZE 288
330#define FSF_FCP_RSP_SIZE 128 317#define FSF_FCP_RSP_SIZE 128
@@ -342,8 +329,8 @@ struct fsf_qtcb_bottom_io {
342 329
343struct fsf_qtcb_bottom_support { 330struct fsf_qtcb_bottom_support {
344 u32 operation_subtype; 331 u32 operation_subtype;
345 u8 res1[12]; 332 u8 res1[13];
346 u32 d_id; 333 u8 d_id[3];
347 u32 option; 334 u32 option;
348 u64 fcp_lun; 335 u64 fcp_lun;
349 u64 res2; 336 u64 res2;
@@ -372,18 +359,18 @@ struct fsf_qtcb_bottom_config {
372 u32 fc_topology; 359 u32 fc_topology;
373 u32 fc_link_speed; 360 u32 fc_link_speed;
374 u32 adapter_type; 361 u32 adapter_type;
375 u32 peer_d_id; 362 u8 res0;
363 u8 peer_d_id[3];
376 u8 res1[2]; 364 u8 res1[2];
377 u16 timer_interval; 365 u16 timer_interval;
378 u8 res2[8]; 366 u8 res2[9];
379 u32 s_id; 367 u8 s_id[3];
380 struct fsf_nport_serv_param nport_serv_param; 368 u8 nport_serv_param[128];
381 u8 reserved_nport_serv_param[16];
382 u8 res3[8]; 369 u8 res3[8];
383 u32 adapter_ports; 370 u32 adapter_ports;
384 u32 hardware_version; 371 u32 hardware_version;
385 u8 serial_number[32]; 372 u8 serial_number[32];
386 struct fsf_nport_serv_param plogi_payload; 373 u8 plogi_payload[112];
387 struct fsf_statistics_info stat_info; 374 struct fsf_statistics_info stat_info;
388 u8 res4[112]; 375 u8 res4[112];
389} __attribute__ ((packed)); 376} __attribute__ ((packed));
@@ -450,4 +437,22 @@ struct zfcp_blk_drv_data {
450 u64 fabric_lat; 437 u64 fabric_lat;
451} __attribute__ ((packed)); 438} __attribute__ ((packed));
452 439
440/**
441 * struct zfcp_fsf_ct_els - zfcp data for ct or els request
442 * @req: scatter-gather list for request
443 * @resp: scatter-gather list for response
444 * @handler: handler function (called for response to the request)
445 * @handler_data: data passed to handler function
446 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
447 * @status: used to pass error status to calling function
448 */
449struct zfcp_fsf_ct_els {
450 struct scatterlist *req;
451 struct scatterlist *resp;
452 void (*handler)(void *);
453 void *handler_data;
454 struct zfcp_port *port;
455 int status;
456};
457
453#endif /* FSF_H */ 458#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 0e1a34627a2e..771cc536a989 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -9,29 +9,33 @@
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/types.h>
13#include <scsi/fc/fc_fcp.h>
12#include <asm/atomic.h> 14#include <asm/atomic.h>
13#include "zfcp_ext.h" 15#include "zfcp_ext.h"
14#include "zfcp_dbf.h" 16#include "zfcp_dbf.h"
17#include "zfcp_fc.h"
15 18
16static unsigned int default_depth = 32; 19static unsigned int default_depth = 32;
17module_param_named(queue_depth, default_depth, uint, 0600); 20module_param_named(queue_depth, default_depth, uint, 0600);
18MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 21MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
19 22
20/* Find start of Sense Information in FCP response unit*/ 23static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
21char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) 24 int reason)
22{ 25{
23 char *fcp_sns_info_ptr; 26 switch (reason) {
24 27 case SCSI_QDEPTH_DEFAULT:
25 fcp_sns_info_ptr = (unsigned char *) &fcp_rsp_iu[1]; 28 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
26 if (fcp_rsp_iu->validity.bits.fcp_rsp_len_valid) 29 break;
27 fcp_sns_info_ptr += fcp_rsp_iu->fcp_rsp_len; 30 case SCSI_QDEPTH_QFULL:
28 31 scsi_track_queue_full(sdev, depth);
29 return fcp_sns_info_ptr; 32 break;
30} 33 case SCSI_QDEPTH_RAMP_UP:
31 34 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
32static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth) 35 break;
33{ 36 default:
34 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); 37 return -EOPNOTSUPP;
38 }
35 return sdev->queue_depth; 39 return sdev->queue_depth;
36} 40}
37 41
@@ -39,7 +43,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
39{ 43{
40 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 44 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
41 unit->device = NULL; 45 unit->device = NULL;
42 zfcp_unit_put(unit); 46 put_device(&unit->sysfs_device);
43} 47}
44 48
45static int zfcp_scsi_slave_configure(struct scsi_device *sdp) 49static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -99,12 +103,26 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
99 } 103 }
100 104
101 status = atomic_read(&unit->status); 105 status = atomic_read(&unit->status);
102 if (unlikely((status & ZFCP_STATUS_COMMON_ERP_FAILED) || 106 if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
103 !(status & ZFCP_STATUS_COMMON_RUNNING))) { 107 !(atomic_read(&unit->port->status) &
108 ZFCP_STATUS_COMMON_ERP_FAILED)) {
109 /* only unit access denied, but port is good
110 * not covered by FC transport, have to fail here */
104 zfcp_scsi_command_fail(scpnt, DID_ERROR); 111 zfcp_scsi_command_fail(scpnt, DID_ERROR);
105 return 0; 112 return 0;
106 } 113 }
107 114
115 if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
116 /* This could be either
117 * open unit pending: this is temporary, will result in
118 * open unit or ERP_FAILED, so retry command
119 * call to rport_delete pending: mimic retry from
120 * fc_remote_port_chkready until rport is BLOCKED
121 */
122 zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY);
123 return 0;
124 }
125
108 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt); 126 ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
109 if (unlikely(ret == -EBUSY)) 127 if (unlikely(ret == -EBUSY))
110 return SCSI_MLQUEUE_DEVICE_BUSY; 128 return SCSI_MLQUEUE_DEVICE_BUSY;
@@ -115,49 +133,44 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
115} 133}
116 134
117static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter, 135static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
118 int channel, unsigned int id, 136 unsigned int id, u64 lun)
119 unsigned int lun)
120{ 137{
138 unsigned long flags;
121 struct zfcp_port *port; 139 struct zfcp_port *port;
122 struct zfcp_unit *unit; 140 struct zfcp_unit *unit = NULL;
123 int scsi_lun;
124 141
125 list_for_each_entry(port, &adapter->port_list_head, list) { 142 read_lock_irqsave(&adapter->port_list_lock, flags);
143 list_for_each_entry(port, &adapter->port_list, list) {
126 if (!port->rport || (id != port->rport->scsi_target_id)) 144 if (!port->rport || (id != port->rport->scsi_target_id))
127 continue; 145 continue;
128 list_for_each_entry(unit, &port->unit_list_head, list) { 146 unit = zfcp_get_unit_by_lun(port, lun);
129 scsi_lun = scsilun_to_int( 147 if (unit)
130 (struct scsi_lun *)&unit->fcp_lun); 148 break;
131 if (lun == scsi_lun)
132 return unit;
133 }
134 } 149 }
150 read_unlock_irqrestore(&adapter->port_list_lock, flags);
135 151
136 return NULL; 152 return unit;
137} 153}
138 154
139static int zfcp_scsi_slave_alloc(struct scsi_device *sdp) 155static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
140{ 156{
141 struct zfcp_adapter *adapter; 157 struct zfcp_adapter *adapter;
142 struct zfcp_unit *unit; 158 struct zfcp_unit *unit;
143 unsigned long flags; 159 u64 lun;
144 int retval = -ENXIO;
145 160
146 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0]; 161 adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
147 if (!adapter) 162 if (!adapter)
148 goto out; 163 goto out;
149 164
150 read_lock_irqsave(&zfcp_data.config_lock, flags); 165 int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun);
151 unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun); 166 unit = zfcp_unit_lookup(adapter, sdp->id, lun);
152 if (unit) { 167 if (unit) {
153 sdp->hostdata = unit; 168 sdp->hostdata = unit;
154 unit->device = sdp; 169 unit->device = sdp;
155 zfcp_unit_get(unit); 170 return 0;
156 retval = 0;
157 } 171 }
158 read_unlock_irqrestore(&zfcp_data.config_lock, flags);
159out: 172out:
160 return retval; 173 return -ENXIO;
161} 174}
162 175
163static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 176static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -196,6 +209,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
196 break; 209 break;
197 210
198 zfcp_erp_wait(adapter); 211 zfcp_erp_wait(adapter);
212 fc_block_scsi_eh(scpnt);
199 if (!(atomic_read(&adapter->status) & 213 if (!(atomic_read(&adapter->status) &
200 ZFCP_STATUS_COMMON_RUNNING)) { 214 ZFCP_STATUS_COMMON_RUNNING)) {
201 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL, 215 zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
@@ -235,6 +249,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
235 break; 249 break;
236 250
237 zfcp_erp_wait(adapter); 251 zfcp_erp_wait(adapter);
252 fc_block_scsi_eh(scpnt);
238 if (!(atomic_read(&adapter->status) & 253 if (!(atomic_read(&adapter->status) &
239 ZFCP_STATUS_COMMON_RUNNING)) { 254 ZFCP_STATUS_COMMON_RUNNING)) {
240 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt); 255 zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
@@ -249,9 +264,6 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
249 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { 264 if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
250 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt); 265 zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
251 retval = FAILED; 266 retval = FAILED;
252 } else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
253 zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
254 retval = FAILED;
255 } else 267 } else
256 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt); 268 zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
257 269
@@ -261,12 +273,12 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
261 273
262static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 274static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
263{ 275{
264 return zfcp_task_mgmt_function(scpnt, FCP_LOGICAL_UNIT_RESET); 276 return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
265} 277}
266 278
267static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) 279static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
268{ 280{
269 return zfcp_task_mgmt_function(scpnt, FCP_TARGET_RESET); 281 return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
270} 282}
271 283
272static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 284static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
@@ -276,6 +288,7 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
276 288
277 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt); 289 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
278 zfcp_erp_wait(adapter); 290 zfcp_erp_wait(adapter);
291 fc_block_scsi_eh(scpnt);
279 292
280 return SUCCESS; 293 return SUCCESS;
281} 294}
@@ -303,7 +316,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
303 adapter->scsi_host->max_lun = 1; 316 adapter->scsi_host->max_lun = 1;
304 adapter->scsi_host->max_channel = 0; 317 adapter->scsi_host->max_channel = 0;
305 adapter->scsi_host->unique_id = dev_id.devno; 318 adapter->scsi_host->unique_id = dev_id.devno;
306 adapter->scsi_host->max_cmd_len = 255; 319 adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
307 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; 320 adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
308 321
309 adapter->scsi_host->hostdata[0] = (unsigned long) adapter; 322 adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -325,12 +338,11 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
325 if (!shost) 338 if (!shost)
326 return; 339 return;
327 340
328 read_lock_irq(&zfcp_data.config_lock); 341 read_lock_irq(&adapter->port_list_lock);
329 list_for_each_entry(port, &adapter->port_list_head, list) 342 list_for_each_entry(port, &adapter->port_list, list)
330 if (port->rport) 343 port->rport = NULL;
331 port->rport = NULL; 344 read_unlock_irq(&adapter->port_list_lock);
332 345
333 read_unlock_irq(&zfcp_data.config_lock);
334 fc_remove_host(shost); 346 fc_remove_host(shost);
335 scsi_remove_host(shost); 347 scsi_remove_host(shost);
336 scsi_host_put(shost); 348 scsi_host_put(shost);
@@ -348,7 +360,7 @@ zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
348 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); 360 fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
349 if (!fc_stats) 361 if (!fc_stats)
350 return NULL; 362 return NULL;
351 adapter->fc_stats = fc_stats; /* freed in adater_dequeue */ 363 adapter->fc_stats = fc_stats; /* freed in adapter_release */
352 } 364 }
353 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); 365 memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
354 return adapter->fc_stats; 366 return adapter->fc_stats;
@@ -464,7 +476,7 @@ static void zfcp_reset_fc_host_stats(struct Scsi_Host *shost)
464 adapter->stats_reset = jiffies/HZ; 476 adapter->stats_reset = jiffies/HZ;
465 kfree(adapter->stats_reset_data); 477 kfree(adapter->stats_reset_data);
466 adapter->stats_reset_data = data; /* finally freed in 478 adapter->stats_reset_data = data; /* finally freed in
467 adapter_dequeue */ 479 adapter_release */
468 } 480 }
469} 481}
470 482
@@ -495,7 +507,7 @@ static void zfcp_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
495 * @rport: The FC rport where to teminate I/O 507 * @rport: The FC rport where to teminate I/O
496 * 508 *
497 * Abort all pending SCSI commands for a port by closing the 509 * Abort all pending SCSI commands for a port by closing the
498 * port. Using a reopen for avoids a conflict with a shutdown 510 * port. Using a reopen avoiding a conflict with a shutdown
499 * overwriting a reopen. 511 * overwriting a reopen.
500 */ 512 */
501static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport) 513static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
@@ -505,15 +517,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
505 struct zfcp_adapter *adapter = 517 struct zfcp_adapter *adapter =
506 (struct zfcp_adapter *)shost->hostdata[0]; 518 (struct zfcp_adapter *)shost->hostdata[0];
507 519
508 write_lock_irq(&zfcp_data.config_lock);
509 port = zfcp_get_port_by_wwpn(adapter, rport->port_name); 520 port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
510 if (port)
511 zfcp_port_get(port);
512 write_unlock_irq(&zfcp_data.config_lock);
513 521
514 if (port) { 522 if (port) {
515 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL); 523 zfcp_erp_port_reopen(port, 0, "sctrpi1", NULL);
516 zfcp_port_put(port); 524 put_device(&port->sysfs_device);
517 } 525 }
518} 526}
519 527
@@ -555,31 +563,34 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
555 563
556void zfcp_scsi_schedule_rport_register(struct zfcp_port *port) 564void zfcp_scsi_schedule_rport_register(struct zfcp_port *port)
557{ 565{
558 zfcp_port_get(port); 566 get_device(&port->sysfs_device);
559 port->rport_task = RPORT_ADD; 567 port->rport_task = RPORT_ADD;
560 568
561 if (!queue_work(port->adapter->work_queue, &port->rport_work)) 569 if (!queue_work(port->adapter->work_queue, &port->rport_work))
562 zfcp_port_put(port); 570 put_device(&port->sysfs_device);
563} 571}
564 572
565void zfcp_scsi_schedule_rport_block(struct zfcp_port *port) 573void zfcp_scsi_schedule_rport_block(struct zfcp_port *port)
566{ 574{
567 zfcp_port_get(port); 575 get_device(&port->sysfs_device);
568 port->rport_task = RPORT_DEL; 576 port->rport_task = RPORT_DEL;
569 577
570 if (port->rport && queue_work(port->adapter->work_queue, 578 if (port->rport && queue_work(port->adapter->work_queue,
571 &port->rport_work)) 579 &port->rport_work))
572 return; 580 return;
573 581
574 zfcp_port_put(port); 582 put_device(&port->sysfs_device);
575} 583}
576 584
577void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter) 585void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
578{ 586{
587 unsigned long flags;
579 struct zfcp_port *port; 588 struct zfcp_port *port;
580 589
581 list_for_each_entry(port, &adapter->port_list_head, list) 590 read_lock_irqsave(&adapter->port_list_lock, flags);
591 list_for_each_entry(port, &adapter->port_list, list)
582 zfcp_scsi_schedule_rport_block(port); 592 zfcp_scsi_schedule_rport_block(port);
593 read_unlock_irqrestore(&adapter->port_list_lock, flags);
583} 594}
584 595
585void zfcp_scsi_rport_work(struct work_struct *work) 596void zfcp_scsi_rport_work(struct work_struct *work)
@@ -597,7 +608,7 @@ void zfcp_scsi_rport_work(struct work_struct *work)
597 } 608 }
598 } 609 }
599 610
600 zfcp_port_put(port); 611 put_device(&port->sysfs_device);
601} 612}
602 613
603 614
@@ -615,21 +626,7 @@ void zfcp_scsi_scan(struct work_struct *work)
615 scsilun_to_int((struct scsi_lun *) 626 scsilun_to_int((struct scsi_lun *)
616 &unit->fcp_lun), 0); 627 &unit->fcp_lun), 0);
617 628
618 zfcp_unit_put(unit); 629 put_device(&unit->sysfs_device);
619}
620
621static int zfcp_execute_fc_job(struct fc_bsg_job *job)
622{
623 switch (job->request->msgcode) {
624 case FC_BSG_RPT_ELS:
625 case FC_BSG_HST_ELS_NOLOGIN:
626 return zfcp_fc_execute_els_fc_job(job);
627 case FC_BSG_RPT_CT:
628 case FC_BSG_HST_CT:
629 return zfcp_fc_execute_ct_fc_job(job);
630 default:
631 return -EINVAL;
632 }
633} 630}
634 631
635struct fc_function_template zfcp_transport_functions = { 632struct fc_function_template zfcp_transport_functions = {
@@ -643,6 +640,7 @@ struct fc_function_template zfcp_transport_functions = {
643 .show_host_port_name = 1, 640 .show_host_port_name = 1,
644 .show_host_permanent_port_name = 1, 641 .show_host_permanent_port_name = 1,
645 .show_host_supported_classes = 1, 642 .show_host_supported_classes = 1,
643 .show_host_supported_fc4s = 1,
646 .show_host_supported_speeds = 1, 644 .show_host_supported_speeds = 1,
647 .show_host_maxframe_size = 1, 645 .show_host_maxframe_size = 1,
648 .show_host_serial_number = 1, 646 .show_host_serial_number = 1,
@@ -652,13 +650,15 @@ struct fc_function_template zfcp_transport_functions = {
652 .get_host_port_state = zfcp_get_host_port_state, 650 .get_host_port_state = zfcp_get_host_port_state,
653 .terminate_rport_io = zfcp_scsi_terminate_rport_io, 651 .terminate_rport_io = zfcp_scsi_terminate_rport_io,
654 .show_host_port_state = 1, 652 .show_host_port_state = 1,
655 .bsg_request = zfcp_execute_fc_job, 653 .show_host_active_fc4s = 1,
654 .bsg_request = zfcp_fc_exec_bsg_job,
656 /* no functions registered for following dynamic attributes but 655 /* no functions registered for following dynamic attributes but
657 directly set by LLDD */ 656 directly set by LLDD */
658 .show_host_port_type = 1, 657 .show_host_port_type = 1,
659 .show_host_speed = 1, 658 .show_host_speed = 1,
660 .show_host_port_id = 1, 659 .show_host_port_id = 1,
661 .disable_target_scan = 1, 660 .disable_target_scan = 1,
661 .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
662}; 662};
663 663
664struct zfcp_data zfcp_data = { 664struct zfcp_data zfcp_data = {
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 079a8cf518a3..f539e006683c 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * sysfs attributes. 4 * sysfs attributes.
5 * 5 *
6 * Copyright IBM Corporation 2008 6 * Copyright IBM Corporation 2008, 2009
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -19,30 +19,44 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
19 struct device_attribute *at,\ 19 struct device_attribute *at,\
20 char *buf) \ 20 char *buf) \
21{ \ 21{ \
22 struct _feat_def *_feat = dev_get_drvdata(dev); \ 22 struct _feat_def *_feat = container_of(dev, struct _feat_def, \
23 sysfs_device); \
23 \ 24 \
24 return sprintf(buf, _format, _value); \ 25 return sprintf(buf, _format, _value); \
25} \ 26} \
26static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
27 zfcp_sysfs_##_feat##_##_name##_show, NULL); 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
28 29
29ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, status, "0x%08x\n", 30#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
30 atomic_read(&adapter->status)); 31static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
31ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwnn, "0x%016llx\n", 32 struct device_attribute *at,\
32 (unsigned long long) adapter->peer_wwnn); 33 char *buf) \
33ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_wwpn, "0x%016llx\n", 34{ \
34 (unsigned long long) adapter->peer_wwpn); 35 struct ccw_device *cdev = to_ccwdev(dev); \
35ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, peer_d_id, "0x%06x\n", 36 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); \
36 adapter->peer_d_id); 37 int i; \
37ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, card_version, "0x%04x\n", 38 \
38 adapter->hydra_version); 39 if (!adapter) \
39ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, lic_version, "0x%08x\n", 40 return -ENODEV; \
40 adapter->fsf_lic_version); 41 \
41ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, hardware_version, "0x%08x\n", 42 i = sprintf(buf, _format, _value); \
42 adapter->hardware_version); 43 zfcp_ccw_adapter_put(adapter); \
43ZFCP_DEFINE_ATTR(zfcp_adapter, adapter, in_recovery, "%d\n", 44 return i; \
44 (atomic_read(&adapter->status) & 45} \
45 ZFCP_STATUS_COMMON_ERP_INUSE) != 0); 46static ZFCP_DEV_ATTR(adapter, _name, S_IRUGO, \
47 zfcp_sysfs_adapter_##_name##_show, NULL);
48
49ZFCP_DEFINE_A_ATTR(status, "0x%08x\n", atomic_read(&adapter->status));
50ZFCP_DEFINE_A_ATTR(peer_wwnn, "0x%016llx\n",
51 (unsigned long long) adapter->peer_wwnn);
52ZFCP_DEFINE_A_ATTR(peer_wwpn, "0x%016llx\n",
53 (unsigned long long) adapter->peer_wwpn);
54ZFCP_DEFINE_A_ATTR(peer_d_id, "0x%06x\n", adapter->peer_d_id);
55ZFCP_DEFINE_A_ATTR(card_version, "0x%04x\n", adapter->hydra_version);
56ZFCP_DEFINE_A_ATTR(lic_version, "0x%08x\n", adapter->fsf_lic_version);
57ZFCP_DEFINE_A_ATTR(hardware_version, "0x%08x\n", adapter->hardware_version);
58ZFCP_DEFINE_A_ATTR(in_recovery, "%d\n", (atomic_read(&adapter->status) &
59 ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
46 60
47ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n", 61ZFCP_DEFINE_ATTR(zfcp_port, port, status, "0x%08x\n",
48 atomic_read(&port->status)); 62 atomic_read(&port->status));
@@ -73,7 +87,8 @@ static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev, \
73 struct device_attribute *attr, \ 87 struct device_attribute *attr, \
74 char *buf) \ 88 char *buf) \
75{ \ 89{ \
76 struct _feat_def *_feat = dev_get_drvdata(dev); \ 90 struct _feat_def *_feat = container_of(dev, struct _feat_def, \
91 sysfs_device); \
77 \ 92 \
78 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \ 93 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED) \
79 return sprintf(buf, "1\n"); \ 94 return sprintf(buf, "1\n"); \
@@ -84,15 +99,13 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
84 struct device_attribute *attr,\ 99 struct device_attribute *attr,\
85 const char *buf, size_t count)\ 100 const char *buf, size_t count)\
86{ \ 101{ \
87 struct _feat_def *_feat = dev_get_drvdata(dev); \ 102 struct _feat_def *_feat = container_of(dev, struct _feat_def, \
103 sysfs_device); \
88 unsigned long val; \ 104 unsigned long val; \
89 int retval = 0; \ 105 int retval = 0; \
90 \ 106 \
91 mutex_lock(&zfcp_data.config_mutex); \ 107 if (!(_feat && get_device(&_feat->sysfs_device))) \
92 if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) { \ 108 return -EBUSY; \
93 retval = -EBUSY; \
94 goto out; \
95 } \
96 \ 109 \
97 if (strict_strtoul(buf, 0, &val) || val != 0) { \ 110 if (strict_strtoul(buf, 0, &val) || val != 0) { \
98 retval = -EINVAL; \ 111 retval = -EINVAL; \
@@ -105,29 +118,82 @@ static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev, \
105 _reopen_id, NULL); \ 118 _reopen_id, NULL); \
106 zfcp_erp_wait(_adapter); \ 119 zfcp_erp_wait(_adapter); \
107out: \ 120out: \
108 mutex_unlock(&zfcp_data.config_mutex); \ 121 put_device(&_feat->sysfs_device); \
109 return retval ? retval : (ssize_t) count; \ 122 return retval ? retval : (ssize_t) count; \
110} \ 123} \
111static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \ 124static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO, \
112 zfcp_sysfs_##_feat##_failed_show, \ 125 zfcp_sysfs_##_feat##_failed_show, \
113 zfcp_sysfs_##_feat##_failed_store); 126 zfcp_sysfs_##_feat##_failed_store);
114 127
115ZFCP_SYSFS_FAILED(zfcp_adapter, adapter, adapter, "syafai1", "syafai2");
116ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2"); 128ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
117ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2"); 129ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
118 130
131static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
132 struct device_attribute *attr,
133 char *buf)
134{
135 struct ccw_device *cdev = to_ccwdev(dev);
136 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
137 int i;
138
139 if (!adapter)
140 return -ENODEV;
141
142 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
143 i = sprintf(buf, "1\n");
144 else
145 i = sprintf(buf, "0\n");
146
147 zfcp_ccw_adapter_put(adapter);
148 return i;
149}
150
151static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
152 struct device_attribute *attr,
153 const char *buf, size_t count)
154{
155 struct ccw_device *cdev = to_ccwdev(dev);
156 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
157 unsigned long val;
158 int retval = 0;
159
160 if (!adapter)
161 return -ENODEV;
162
163 if (strict_strtoul(buf, 0, &val) || val != 0) {
164 retval = -EINVAL;
165 goto out;
166 }
167
168 zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL,
169 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
170 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
171 "syafai2", NULL);
172 zfcp_erp_wait(adapter);
173out:
174 zfcp_ccw_adapter_put(adapter);
175 return retval ? retval : (ssize_t) count;
176}
177static ZFCP_DEV_ATTR(adapter, failed, S_IWUSR | S_IRUGO,
178 zfcp_sysfs_adapter_failed_show,
179 zfcp_sysfs_adapter_failed_store);
180
119static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev, 181static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
120 struct device_attribute *attr, 182 struct device_attribute *attr,
121 const char *buf, size_t count) 183 const char *buf, size_t count)
122{ 184{
123 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 185 struct ccw_device *cdev = to_ccwdev(dev);
124 int ret; 186 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
125 187
126 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) 188 if (!adapter)
127 return -EBUSY; 189 return -ENODEV;
190
191 /* sync the user-space- with the kernel-invocation of scan_work */
192 queue_work(adapter->work_queue, &adapter->scan_work);
193 flush_work(&adapter->scan_work);
194 zfcp_ccw_adapter_put(adapter);
128 195
129 ret = zfcp_fc_scan_ports(adapter); 196 return (ssize_t) count;
130 return ret ? ret : (ssize_t) count;
131} 197}
132static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, 198static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
133 zfcp_sysfs_port_rescan_store); 199 zfcp_sysfs_port_rescan_store);
@@ -136,44 +202,34 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
136 struct device_attribute *attr, 202 struct device_attribute *attr,
137 const char *buf, size_t count) 203 const char *buf, size_t count)
138{ 204{
139 struct zfcp_adapter *adapter = dev_get_drvdata(dev); 205 struct ccw_device *cdev = to_ccwdev(dev);
206 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
140 struct zfcp_port *port; 207 struct zfcp_port *port;
141 u64 wwpn; 208 u64 wwpn;
142 int retval = 0; 209 int retval = -EINVAL;
143 LIST_HEAD(port_remove_lh);
144 210
145 mutex_lock(&zfcp_data.config_mutex); 211 if (!adapter)
146 if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) { 212 return -ENODEV;
147 retval = -EBUSY;
148 goto out;
149 }
150 213
151 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn)) { 214 if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
152 retval = -EINVAL;
153 goto out; 215 goto out;
154 }
155 216
156 write_lock_irq(&zfcp_data.config_lock);
157 port = zfcp_get_port_by_wwpn(adapter, wwpn); 217 port = zfcp_get_port_by_wwpn(adapter, wwpn);
158 if (port && (atomic_read(&port->refcount) == 0)) { 218 if (!port)
159 zfcp_port_get(port);
160 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &port->status);
161 list_move(&port->list, &port_remove_lh);
162 } else
163 port = NULL;
164 write_unlock_irq(&zfcp_data.config_lock);
165
166 if (!port) {
167 retval = -ENXIO;
168 goto out; 219 goto out;
169 } 220 else
221 retval = 0;
222
223 write_lock_irq(&adapter->port_list_lock);
224 list_del(&port->list);
225 write_unlock_irq(&adapter->port_list_lock);
226
227 put_device(&port->sysfs_device);
170 228
171 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL); 229 zfcp_erp_port_shutdown(port, 0, "syprs_1", NULL);
172 zfcp_erp_wait(adapter); 230 zfcp_device_unregister(&port->sysfs_device, &zfcp_sysfs_port_attrs);
173 zfcp_port_put(port);
174 zfcp_port_dequeue(port);
175 out: 231 out:
176 mutex_unlock(&zfcp_data.config_mutex); 232 zfcp_ccw_adapter_put(adapter);
177 return retval ? retval : (ssize_t) count; 233 return retval ? retval : (ssize_t) count;
178} 234}
179static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL, 235static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -202,16 +258,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
202 struct device_attribute *attr, 258 struct device_attribute *attr,
203 const char *buf, size_t count) 259 const char *buf, size_t count)
204{ 260{
205 struct zfcp_port *port = dev_get_drvdata(dev); 261 struct zfcp_port *port = container_of(dev, struct zfcp_port,
262 sysfs_device);
206 struct zfcp_unit *unit; 263 struct zfcp_unit *unit;
207 u64 fcp_lun; 264 u64 fcp_lun;
208 int retval = -EINVAL; 265 int retval = -EINVAL;
209 266
210 mutex_lock(&zfcp_data.config_mutex); 267 if (!(port && get_device(&port->sysfs_device)))
211 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 268 return -EBUSY;
212 retval = -EBUSY;
213 goto out;
214 }
215 269
216 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 270 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
217 goto out; 271 goto out;
@@ -219,14 +273,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
219 unit = zfcp_unit_enqueue(port, fcp_lun); 273 unit = zfcp_unit_enqueue(port, fcp_lun);
220 if (IS_ERR(unit)) 274 if (IS_ERR(unit))
221 goto out; 275 goto out;
222 276 else
223 retval = 0; 277 retval = 0;
224 278
225 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); 279 zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
226 zfcp_erp_wait(unit->port->adapter); 280 zfcp_erp_wait(unit->port->adapter);
227 zfcp_unit_put(unit); 281 flush_work(&unit->scsi_work);
228out: 282out:
229 mutex_unlock(&zfcp_data.config_mutex); 283 put_device(&port->sysfs_device);
230 return retval ? retval : (ssize_t) count; 284 return retval ? retval : (ssize_t) count;
231} 285}
232static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store); 286static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -235,54 +289,37 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
235 struct device_attribute *attr, 289 struct device_attribute *attr,
236 const char *buf, size_t count) 290 const char *buf, size_t count)
237{ 291{
238 struct zfcp_port *port = dev_get_drvdata(dev); 292 struct zfcp_port *port = container_of(dev, struct zfcp_port,
293 sysfs_device);
239 struct zfcp_unit *unit; 294 struct zfcp_unit *unit;
240 u64 fcp_lun; 295 u64 fcp_lun;
241 int retval = 0; 296 int retval = -EINVAL;
242 LIST_HEAD(unit_remove_lh);
243 297
244 mutex_lock(&zfcp_data.config_mutex); 298 if (!(port && get_device(&port->sysfs_device)))
245 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) { 299 return -EBUSY;
246 retval = -EBUSY;
247 goto out;
248 }
249 300
250 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) { 301 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
251 retval = -EINVAL;
252 goto out; 302 goto out;
253 }
254 303
255 write_lock_irq(&zfcp_data.config_lock);
256 unit = zfcp_get_unit_by_lun(port, fcp_lun); 304 unit = zfcp_get_unit_by_lun(port, fcp_lun);
257 if (unit) { 305 if (!unit)
258 write_unlock_irq(&zfcp_data.config_lock); 306 goto out;
259 /* wait for possible timeout during SCSI probe */ 307 else
260 flush_work(&unit->scsi_work); 308 retval = 0;
261 write_lock_irq(&zfcp_data.config_lock);
262
263 if (atomic_read(&unit->refcount) == 0) {
264 zfcp_unit_get(unit);
265 atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE,
266 &unit->status);
267 list_move(&unit->list, &unit_remove_lh);
268 } else {
269 unit = NULL;
270 }
271 }
272 309
273 write_unlock_irq(&zfcp_data.config_lock); 310 /* wait for possible timeout during SCSI probe */
311 flush_work(&unit->scsi_work);
274 312
275 if (!unit) { 313 write_lock_irq(&port->unit_list_lock);
276 retval = -ENXIO; 314 list_del(&unit->list);
277 goto out; 315 write_unlock_irq(&port->unit_list_lock);
278 } 316
317 put_device(&unit->sysfs_device);
279 318
280 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL); 319 zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
281 zfcp_erp_wait(unit->port->adapter); 320 zfcp_device_unregister(&unit->sysfs_device, &zfcp_sysfs_unit_attrs);
282 zfcp_unit_put(unit);
283 zfcp_unit_dequeue(unit);
284out: 321out:
285 mutex_unlock(&zfcp_data.config_mutex); 322 put_device(&port->sysfs_device);
286 return retval ? retval : (ssize_t) count; 323 return retval ? retval : (ssize_t) count;
287} 324}
288static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store); 325static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);