aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/Kconfig18
-rw-r--r--drivers/s390/block/Makefile6
-rw-r--r--drivers/s390/block/dasd.c165
-rw-r--r--drivers/s390/block/dasd_3990_erp.c7
-rw-r--r--drivers/s390/block/dasd_alias.c103
-rw-r--r--drivers/s390/block/dasd_devmap.c39
-rw-r--r--drivers/s390/block/dasd_diag.c13
-rw-r--r--drivers/s390/block/dasd_diag.h3
-rw-r--r--drivers/s390/block/dasd_eckd.c692
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c3
-rw-r--r--drivers/s390/block/dasd_fba.c25
-rw-r--r--drivers/s390/block/dasd_fba.h3
-rw-r--r--drivers/s390/block/dasd_genhd.c3
-rw-r--r--drivers/s390/block/dasd_int.h12
-rw-r--r--drivers/s390/block/dasd_ioctl.c26
-rw-r--r--drivers/s390/block/dasd_proc.c3
-rw-r--r--drivers/s390/block/dcssblk.c59
-rw-r--r--drivers/s390/block/scm_blk.c445
-rw-r--r--drivers/s390/block/scm_blk.h117
-rw-r--r--drivers/s390/block/scm_blk_cluster.c228
-rw-r--r--drivers/s390/block/scm_drv.c81
-rw-r--r--drivers/s390/block/xpram.c7
-rw-r--r--drivers/s390/char/con3215.c195
-rw-r--r--drivers/s390/char/con3270.c1
-rw-r--r--drivers/s390/char/ctrlchar.c3
-rw-r--r--drivers/s390/char/ctrlchar.h3
-rw-r--r--drivers/s390/char/fs3270.c2
-rw-r--r--drivers/s390/char/keyboard.c33
-rw-r--r--drivers/s390/char/keyboard.h17
-rw-r--r--drivers/s390/char/monreader.c5
-rw-r--r--drivers/s390/char/raw3270.c5
-rw-r--r--drivers/s390/char/sclp.c20
-rw-r--r--drivers/s390/char/sclp.h13
-rw-r--r--drivers/s390/char/sclp_cmd.c137
-rw-r--r--drivers/s390/char/sclp_config.c10
-rw-r--r--drivers/s390/char/sclp_cpi.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/sclp_cpi_sys.h1
-rw-r--r--drivers/s390/char/sclp_ocf.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c7
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_sdias.c107
-rw-r--r--drivers/s390/char/sclp_tty.c40
-rw-r--r--drivers/s390/char/sclp_tty.h3
-rw-r--r--drivers/s390/char/sclp_vt220.c37
-rw-r--r--drivers/s390/char/tape.h45
-rw-r--r--drivers/s390/char/tape_34xx.c138
-rw-r--r--drivers/s390/char/tape_3590.c107
-rw-r--r--drivers/s390/char/tape_3590.h3
-rw-r--r--drivers/s390/char/tape_char.c16
-rw-r--r--drivers/s390/char/tape_class.c5
-rw-r--r--drivers/s390/char/tape_class.h4
-rw-r--r--drivers/s390/char/tape_core.c19
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/tape_std.h7
-rw-r--r--drivers/s390/char/tty3270.c161
-rw-r--r--drivers/s390/char/tty3270.h2
-rw-r--r--drivers/s390/char/vmcp.c4
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/char/vmlogrdr.c47
-rw-r--r--drivers/s390/char/vmur.c6
-rw-r--r--drivers/s390/char/vmwatchdog.c6
-rw-r--r--drivers/s390/char/zcore.c26
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/airq.c3
-rw-r--r--drivers/s390/cio/blacklist.c7
-rw-r--r--drivers/s390/cio/ccwgroup.c417
-rw-r--r--drivers/s390/cio/ccwreq.c23
-rw-r--r--drivers/s390/cio/chp.c18
-rw-r--r--drivers/s390/cio/chp.h4
-rw-r--r--drivers/s390/cio/chsc.c214
-rw-r--r--drivers/s390/cio/chsc.h43
-rw-r--r--drivers/s390/cio/chsc_sch.c6
-rw-r--r--drivers/s390/cio/cio.c87
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/cmf.c8
-rw-r--r--drivers/s390/cio/crw.c3
-rw-r--r--drivers/s390/cio/css.c112
-rw-r--r--drivers/s390/cio/css.h5
-rw-r--r--drivers/s390/cio/device.c67
-rw-r--r--drivers/s390/cio/device.h15
-rw-r--r--drivers/s390/cio/device_fsm.c33
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_ops.c37
-rw-r--r--drivers/s390/cio/device_pgid.c12
-rw-r--r--drivers/s390/cio/device_status.c5
-rw-r--r--drivers/s390/cio/eadm_sch.c401
-rw-r--r--drivers/s390/cio/eadm_sch.h20
-rw-r--r--drivers/s390/cio/idset.c27
-rw-r--r--drivers/s390/cio/idset.h5
-rw-r--r--drivers/s390/cio/io_sch.h7
-rw-r--r--drivers/s390/cio/orb.h24
-rw-r--r--drivers/s390/cio/qdio.h54
-rw-r--r--drivers/s390/cio/qdio_debug.c19
-rw-r--r--drivers/s390/cio/qdio_debug.h40
-rw-r--r--drivers/s390/cio/qdio_main.c389
-rw-r--r--drivers/s390/cio/qdio_setup.c99
-rw-r--r--drivers/s390/cio/qdio_thinint.c142
-rw-r--r--drivers/s390/cio/scm.c317
-rw-r--r--drivers/s390/crypto/Makefile13
-rw-r--r--drivers/s390/crypto/ap_bus.c272
-rw-r--r--drivers/s390/crypto/ap_bus.h44
-rw-r--r--drivers/s390/crypto/zcrypt_api.c191
-rw-r--r--drivers/s390/crypto/zcrypt_api.h21
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c376
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c149
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.h12
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h59
-rw-r--r--drivers/s390/crypto/zcrypt_error.h17
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c517
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h41
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c856
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h169
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c13
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c13
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c797
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h5
-rw-r--r--drivers/s390/kvm/kvm_virtio.c35
-rw-r--r--drivers/s390/net/Kconfig5
-rw-r--r--drivers/s390/net/claw.c173
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/ctcm_dbug.h2
-rw-r--r--drivers/s390/net/ctcm_fsms.c15
-rw-r--r--drivers/s390/net/ctcm_fsms.h2
-rw-r--r--drivers/s390/net/ctcm_main.c64
-rw-r--r--drivers/s390/net/ctcm_main.h10
-rw-r--r--drivers/s390/net/ctcm_mpc.c17
-rw-r--r--drivers/s390/net/ctcm_mpc.h2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c41
-rw-r--r--drivers/s390/net/lcs.c98
-rw-r--r--drivers/s390/net/netiucv.c253
-rw-r--r--drivers/s390/net/qeth_core.h81
-rw-r--r--drivers/s390/net/qeth_core_main.c1218
-rw-r--r--drivers/s390/net/qeth_core_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core_mpc.h26
-rw-r--r--drivers/s390/net/qeth_core_sys.c51
-rw-r--r--drivers/s390/net/qeth_l2_main.c78
-rw-r--r--drivers/s390/net/qeth_l3.h6
-rw-r--r--drivers/s390/net/qeth_l3_main.c304
-rw-r--r--drivers/s390/net/qeth_l3_sys.c226
-rw-r--r--drivers/s390/net/smsgiucv.c2
-rw-r--r--drivers/s390/net/smsgiucv.h2
-rw-r--r--drivers/s390/net/smsgiucv_app.c9
-rw-r--r--drivers/s390/scsi/zfcp_aux.c4
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c83
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c5
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c59
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h1
-rw-r--r--drivers/s390/scsi/zfcp_def.h11
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h7
-rw-r--r--drivers/s390/scsi/zfcp_fc.c25
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c141
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c65
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h68
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c20
-rw-r--r--drivers/s390/scsi/zfcp_unit.c38
168 files changed, 4269 insertions, 8576 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 4a3b6232618..8e477bb1f3f 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -70,21 +70,3 @@ config DASD_EER
70 This driver provides a character device interface to the 70 This driver provides a character device interface to the
71 DASD extended error reporting. This is only needed if you want to 71 DASD extended error reporting. This is only needed if you want to
72 use applications written for the EER facility. 72 use applications written for the EER facility.
73
74config SCM_BLOCK
75 def_tristate m
76 prompt "Support for Storage Class Memory"
77 depends on S390 && BLOCK && EADM_SCH && SCM_BUS
78 help
79 Block device driver for Storage Class Memory (SCM). This driver
80 provides a block device interface for each available SCM increment.
81
82 To compile this driver as a module, choose M here: the
83 module will be called scm_block.
84
85config SCM_BLOCK_CLUSTER_WRITE
86 def_bool y
87 prompt "SCM force cluster writes"
88 depends on SCM_BLOCK
89 help
90 Force writes to Storage Class Memory (SCM) to be in done in clusters.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index c2f4e673e03..0a89e080b38 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -17,9 +17,3 @@ obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
17obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o 17obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o 18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
19obj-$(CONFIG_DCSSBLK) += dcssblk.o 19obj-$(CONFIG_DCSSBLK) += dcssblk.o
20
21scm_block-objs := scm_drv.o scm_blk.o
22ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
23scm_block-objs += scm_blk_cluster.o
24endif
25obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 29225e1c159..a1d3ddba99c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com>
@@ -10,12 +11,14 @@
10#define KMSG_COMPONENT "dasd" 11#define KMSG_COMPONENT "dasd"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 13
14#include <linux/kernel_stat.h>
13#include <linux/kmod.h> 15#include <linux/kmod.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/interrupt.h> 17#include <linux/interrupt.h>
16#include <linux/ctype.h> 18#include <linux/ctype.h>
17#include <linux/major.h> 19#include <linux/major.h>
18#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/buffer_head.h>
19#include <linux/hdreg.h> 22#include <linux/hdreg.h>
20#include <linux/async.h> 23#include <linux/async.h>
21#include <linux/mutex.h> 24#include <linux/mutex.h>
@@ -51,7 +54,7 @@ void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
51 54
52MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 55MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
53MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 56MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
54 " Copyright IBM Corp. 2000"); 57 " Copyright 2000 IBM Corporation");
55MODULE_SUPPORTED_DEVICE("dasd"); 58MODULE_SUPPORTED_DEVICE("dasd");
56MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
57 60
@@ -81,7 +84,6 @@ static void dasd_profile_exit(struct dasd_profile *);
81static wait_queue_head_t dasd_init_waitq; 84static wait_queue_head_t dasd_init_waitq;
82static wait_queue_head_t dasd_flush_wq; 85static wait_queue_head_t dasd_flush_wq;
83static wait_queue_head_t generic_waitq; 86static wait_queue_head_t generic_waitq;
84static wait_queue_head_t shutdown_waitq;
85 87
86/* 88/*
87 * Allocate memory for a new device structure. 89 * Allocate memory for a new device structure.
@@ -349,16 +351,6 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
349 return rc; 351 return rc;
350} 352}
351 353
352static inline
353int _wait_for_empty_queues(struct dasd_device *device)
354{
355 if (device->block)
356 return list_empty(&device->ccw_queue) &&
357 list_empty(&device->block->ccw_queue);
358 else
359 return list_empty(&device->ccw_queue);
360}
361
362/* 354/*
363 * Remove device from block device layer. Destroy dirty buffers. 355 * Remove device from block device layer. Destroy dirty buffers.
364 * Forget format information. Check if the target level is basic 356 * Forget format information. Check if the target level is basic
@@ -544,11 +536,11 @@ static void dasd_change_state(struct dasd_device *device)
544 if (rc) 536 if (rc)
545 device->target = device->state; 537 device->target = device->state;
546 538
547 /* let user-space know that the device status changed */
548 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
549
550 if (device->state == device->target) 539 if (device->state == device->target)
551 wake_up(&dasd_init_waitq); 540 wake_up(&dasd_init_waitq);
541
542 /* let user-space know that the device status changed */
543 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
552} 544}
553 545
554/* 546/*
@@ -650,10 +642,6 @@ void dasd_enable_device(struct dasd_device *device)
650 dasd_set_target_state(device, DASD_STATE_NEW); 642 dasd_set_target_state(device, DASD_STATE_NEW);
651 /* Now wait for the devices to come up. */ 643 /* Now wait for the devices to come up. */
652 wait_event(dasd_init_waitq, _wait_for_device(device)); 644 wait_event(dasd_init_waitq, _wait_for_device(device));
653
654 dasd_reload_device(device);
655 if (device->discipline->kick_validate)
656 device->discipline->kick_validate(device);
657} 645}
658 646
659/* 647/*
@@ -1086,7 +1074,7 @@ static const struct file_operations dasd_stats_global_fops = {
1086static void dasd_profile_init(struct dasd_profile *profile, 1074static void dasd_profile_init(struct dasd_profile *profile,
1087 struct dentry *base_dentry) 1075 struct dentry *base_dentry)
1088{ 1076{
1089 umode_t mode; 1077 mode_t mode;
1090 struct dentry *pde; 1078 struct dentry *pde;
1091 1079
1092 if (!base_dentry) 1080 if (!base_dentry)
@@ -1125,7 +1113,7 @@ static void dasd_statistics_removeroot(void)
1125 1113
1126static void dasd_statistics_createroot(void) 1114static void dasd_statistics_createroot(void)
1127{ 1115{
1128 umode_t mode; 1116 mode_t mode;
1129 struct dentry *pde; 1117 struct dentry *pde;
1130 1118
1131 dasd_debugfs_root_entry = NULL; 1119 dasd_debugfs_root_entry = NULL;
@@ -1606,6 +1594,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1606 unsigned long long now; 1594 unsigned long long now;
1607 int expires; 1595 int expires;
1608 1596
1597 kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++;
1609 if (IS_ERR(irb)) { 1598 if (IS_ERR(irb)) {
1610 switch (PTR_ERR(irb)) { 1599 switch (PTR_ERR(irb)) {
1611 case -EIO: 1600 case -EIO:
@@ -1851,13 +1840,6 @@ static void __dasd_device_check_expire(struct dasd_device *device)
1851 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1840 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1852 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1841 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1853 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1842 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1854 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1855 /*
1856 * IO in safe offline processing should not
1857 * run out of retries
1858 */
1859 cqr->retries++;
1860 }
1861 if (device->discipline->term_IO(cqr) != 0) { 1843 if (device->discipline->term_IO(cqr) != 0) {
1862 /* Hmpf, try again in 5 sec */ 1844 /* Hmpf, try again in 5 sec */
1863 dev_err(&device->cdev->dev, 1845 dev_err(&device->cdev->dev,
@@ -2011,8 +1993,6 @@ static void dasd_device_tasklet(struct dasd_device *device)
2011 /* Now check if the head of the ccw queue needs to be started. */ 1993 /* Now check if the head of the ccw queue needs to be started. */
2012 __dasd_device_start_head(device); 1994 __dasd_device_start_head(device);
2013 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1995 spin_unlock_irq(get_ccwdev_lock(device->cdev));
2014 if (waitqueue_active(&shutdown_waitq))
2015 wake_up(&shutdown_waitq);
2016 dasd_put_device(device); 1996 dasd_put_device(device);
2017} 1997}
2018 1998
@@ -2081,14 +2061,13 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2081/* 2061/*
2082 * Wakeup helper for the 'sleep_on' functions. 2062 * Wakeup helper for the 'sleep_on' functions.
2083 */ 2063 */
2084void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2064static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2085{ 2065{
2086 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2066 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2087 cqr->callback_data = DASD_SLEEPON_END_TAG; 2067 cqr->callback_data = DASD_SLEEPON_END_TAG;
2088 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2068 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2089 wake_up(&generic_waitq); 2069 wake_up(&generic_waitq);
2090} 2070}
2091EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2092 2071
2093static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2072static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2094{ 2073{
@@ -2174,7 +2153,6 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2174 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2153 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2175 (!dasd_eer_enabled(device))) { 2154 (!dasd_eer_enabled(device))) {
2176 cqr->status = DASD_CQR_FAILED; 2155 cqr->status = DASD_CQR_FAILED;
2177 cqr->intrc = -EAGAIN;
2178 continue; 2156 continue;
2179 } 2157 }
2180 /* Don't try to start requests if device is stopped */ 2158 /* Don't try to start requests if device is stopped */
@@ -2189,9 +2167,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2189 } else 2167 } else
2190 wait_event(generic_waitq, !(device->stopped)); 2168 wait_event(generic_waitq, !(device->stopped));
2191 2169
2192 if (!cqr->callback) 2170 cqr->callback = dasd_wakeup_cb;
2193 cqr->callback = dasd_wakeup_cb;
2194
2195 cqr->callback_data = DASD_SLEEPON_START_TAG; 2171 cqr->callback_data = DASD_SLEEPON_START_TAG;
2196 dasd_add_request_tail(cqr); 2172 dasd_add_request_tail(cqr);
2197 if (interruptible) { 2173 if (interruptible) {
@@ -2287,11 +2263,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2287 cqr->callback = dasd_wakeup_cb; 2263 cqr->callback = dasd_wakeup_cb;
2288 cqr->callback_data = DASD_SLEEPON_START_TAG; 2264 cqr->callback_data = DASD_SLEEPON_START_TAG;
2289 cqr->status = DASD_CQR_QUEUED; 2265 cqr->status = DASD_CQR_QUEUED;
2290 /* 2266 list_add(&cqr->devlist, &device->ccw_queue);
2291 * add new request as second
2292 * first the terminated cqr needs to be finished
2293 */
2294 list_add(&cqr->devlist, device->ccw_queue.next);
2295 2267
2296 /* let the bh start the request to keep them in order */ 2268 /* let the bh start the request to keep them in order */
2297 dasd_schedule_device_bh(device); 2269 dasd_schedule_device_bh(device);
@@ -2652,8 +2624,6 @@ static void dasd_block_tasklet(struct dasd_block *block)
2652 __dasd_block_start_head(block); 2624 __dasd_block_start_head(block);
2653 spin_unlock(&block->queue_lock); 2625 spin_unlock(&block->queue_lock);
2654 spin_unlock_irq(&block->request_queue_lock); 2626 spin_unlock_irq(&block->request_queue_lock);
2655 if (waitqueue_active(&shutdown_waitq))
2656 wake_up(&shutdown_waitq);
2657 dasd_put_device(block->base); 2627 dasd_put_device(block->base);
2658} 2628}
2659 2629
@@ -3041,11 +3011,11 @@ void dasd_generic_remove(struct ccw_device *cdev)
3041 3011
3042 cdev->handler = NULL; 3012 cdev->handler = NULL;
3043 3013
3014 dasd_remove_sysfs_files(cdev);
3044 device = dasd_device_from_cdev(cdev); 3015 device = dasd_device_from_cdev(cdev);
3045 if (IS_ERR(device)) 3016 if (IS_ERR(device))
3046 return; 3017 return;
3047 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3018 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3048 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3049 /* Already doing offline processing */ 3019 /* Already doing offline processing */
3050 dasd_put_device(device); 3020 dasd_put_device(device);
3051 return; 3021 return;
@@ -3065,8 +3035,6 @@ void dasd_generic_remove(struct ccw_device *cdev)
3065 */ 3035 */
3066 if (block) 3036 if (block)
3067 dasd_free_block(block); 3037 dasd_free_block(block);
3068
3069 dasd_remove_sysfs_files(cdev);
3070} 3038}
3071 3039
3072/* 3040/*
@@ -3145,13 +3113,16 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3145{ 3113{
3146 struct dasd_device *device; 3114 struct dasd_device *device;
3147 struct dasd_block *block; 3115 struct dasd_block *block;
3148 int max_count, open_count, rc; 3116 int max_count, open_count;
3149 3117
3150 rc = 0;
3151 device = dasd_device_from_cdev(cdev); 3118 device = dasd_device_from_cdev(cdev);
3152 if (IS_ERR(device)) 3119 if (IS_ERR(device))
3153 return PTR_ERR(device); 3120 return PTR_ERR(device);
3154 3121 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3122 /* Already doing offline processing */
3123 dasd_put_device(device);
3124 return 0;
3125 }
3155 /* 3126 /*
3156 * We must make sure that this device is currently not in use. 3127 * We must make sure that this device is currently not in use.
3157 * The open_count is increased for every opener, that includes 3128 * The open_count is increased for every opener, that includes
@@ -3175,54 +3146,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3175 return -EBUSY; 3146 return -EBUSY;
3176 } 3147 }
3177 } 3148 }
3178
3179 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3180 /*
3181 * safe offline allready running
3182 * could only be called by normal offline so safe_offline flag
3183 * needs to be removed to run normal offline and kill all I/O
3184 */
3185 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3186 /* Already doing normal offline processing */
3187 dasd_put_device(device);
3188 return -EBUSY;
3189 } else
3190 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
3191
3192 } else
3193 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3194 /* Already doing offline processing */
3195 dasd_put_device(device);
3196 return -EBUSY;
3197 }
3198
3199 /*
3200 * if safe_offline called set safe_offline_running flag and
3201 * clear safe_offline so that a call to normal offline
3202 * can overrun safe_offline processing
3203 */
3204 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3205 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3206 /*
3207 * If we want to set the device safe offline all IO operations
3208 * should be finished before continuing the offline process
3209 * so sync bdev first and then wait for our queues to become
3210 * empty
3211 */
3212 /* sync blockdev and partitions */
3213 rc = fsync_bdev(device->block->bdev);
3214 if (rc != 0)
3215 goto interrupted;
3216
3217 /* schedule device tasklet and wait for completion */
3218 dasd_schedule_device_bh(device);
3219 rc = wait_event_interruptible(shutdown_waitq,
3220 _wait_for_empty_queues(device));
3221 if (rc != 0)
3222 goto interrupted;
3223 }
3224
3225 set_bit(DASD_FLAG_OFFLINE, &device->flags);
3226 dasd_set_target_state(device, DASD_STATE_NEW); 3149 dasd_set_target_state(device, DASD_STATE_NEW);
3227 /* dasd_delete_device destroys the device reference. */ 3150 /* dasd_delete_device destroys the device reference. */
3228 block = device->block; 3151 block = device->block;
@@ -3234,14 +3157,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3234 if (block) 3157 if (block)
3235 dasd_free_block(block); 3158 dasd_free_block(block);
3236 return 0; 3159 return 0;
3237
3238interrupted:
3239 /* interrupted by signal */
3240 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
3241 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3242 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3243 dasd_put_device(device);
3244 return rc;
3245} 3160}
3246 3161
3247int dasd_generic_last_path_gone(struct dasd_device *device) 3162int dasd_generic_last_path_gone(struct dasd_device *device)
@@ -3342,22 +3257,6 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3342 device->path_data.tbvpm |= eventlpm; 3257 device->path_data.tbvpm |= eventlpm;
3343 dasd_schedule_device_bh(device); 3258 dasd_schedule_device_bh(device);
3344 } 3259 }
3345 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3346 if (!(device->path_data.opm & eventlpm) &&
3347 !(device->path_data.tbvpm & eventlpm)) {
3348 /*
3349 * we can not establish a pathgroup on an
3350 * unavailable path, so trigger a path
3351 * verification first
3352 */
3353 device->path_data.tbvpm |= eventlpm;
3354 dasd_schedule_device_bh(device);
3355 }
3356 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3357 "Pathgroup re-established\n");
3358 if (device->discipline->kick_validate)
3359 device->discipline->kick_validate(device);
3360 }
3361 } 3260 }
3362 dasd_put_device(device); 3261 dasd_put_device(device);
3363} 3262}
@@ -3385,9 +3284,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
3385 if (IS_ERR(device)) 3284 if (IS_ERR(device))
3386 return PTR_ERR(device); 3285 return PTR_ERR(device);
3387 3286
3388 /* mark device as suspended */
3389 set_bit(DASD_FLAG_SUSPENDED, &device->flags);
3390
3391 if (device->discipline->freeze) 3287 if (device->discipline->freeze)
3392 rc = device->discipline->freeze(device); 3288 rc = device->discipline->freeze(device);
3393 3289
@@ -3462,7 +3358,6 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
3462 if (device->block) 3358 if (device->block)
3463 dasd_schedule_block_bh(device->block); 3359 dasd_schedule_block_bh(device->block);
3464 3360
3465 clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
3466 dasd_put_device(device); 3361 dasd_put_device(device);
3467 return 0; 3362 return 0;
3468} 3363}
@@ -3561,23 +3456,6 @@ char *dasd_get_sense(struct irb *irb)
3561} 3456}
3562EXPORT_SYMBOL_GPL(dasd_get_sense); 3457EXPORT_SYMBOL_GPL(dasd_get_sense);
3563 3458
3564void dasd_generic_shutdown(struct ccw_device *cdev)
3565{
3566 struct dasd_device *device;
3567
3568 device = dasd_device_from_cdev(cdev);
3569 if (IS_ERR(device))
3570 return;
3571
3572 if (device->block)
3573 dasd_schedule_block_bh(device->block);
3574
3575 dasd_schedule_device_bh(device);
3576
3577 wait_event(shutdown_waitq, _wait_for_empty_queues(device));
3578}
3579EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
3580
3581static int __init dasd_init(void) 3459static int __init dasd_init(void)
3582{ 3460{
3583 int rc; 3461 int rc;
@@ -3585,7 +3463,6 @@ static int __init dasd_init(void)
3585 init_waitqueue_head(&dasd_init_waitq); 3463 init_waitqueue_head(&dasd_init_waitq);
3586 init_waitqueue_head(&dasd_flush_wq); 3464 init_waitqueue_head(&dasd_flush_wq);
3587 init_waitqueue_head(&generic_waitq); 3465 init_waitqueue_head(&generic_waitq);
3588 init_waitqueue_head(&shutdown_waitq);
3589 3466
3590 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 3467 /* register 'common' DASD debug area, used for all DBF_XXX calls */
3591 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 3468 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index f8212d54013..87a0cf160fe 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_3990_erp.c
2 * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com> 3 * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
3 * Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com> 5 * Bugreports.to..: <Linux390@de.ibm.com>
5 * Copyright IBM Corp. 2000, 2001 6 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
6 * 7 *
7 */ 8 */
8 9
@@ -1717,7 +1718,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1717 erp->startdev = device; 1718 erp->startdev = device;
1718 erp->memdev = device; 1719 erp->memdev = device;
1719 erp->magic = default_erp->magic; 1720 erp->magic = default_erp->magic;
1720 erp->expires = default_erp->expires; 1721 erp->expires = 0;
1721 erp->retries = 256; 1722 erp->retries = 256;
1722 erp->buildclk = get_clock(); 1723 erp->buildclk = get_clock();
1723 erp->status = DASD_CQR_FILLED; 1724 erp->status = DASD_CQR_FILLED;
@@ -2362,7 +2363,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
2362 erp->memdev = device; 2363 erp->memdev = device;
2363 erp->block = cqr->block; 2364 erp->block = cqr->block;
2364 erp->magic = cqr->magic; 2365 erp->magic = cqr->magic;
2365 erp->expires = cqr->expires; 2366 erp->expires = 0;
2366 erp->retries = 256; 2367 erp->retries = 256;
2367 erp->buildclk = get_clock(); 2368 erp->buildclk = get_clock();
2368 erp->status = DASD_CQR_FILLED; 2369 erp->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 6b556995bb3..c388eda1e2b 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * PAV alias management for the DASD ECKD discipline 2 * PAV alias management for the DASD ECKD discipline
3 * 3 *
4 * Copyright IBM Corp. 2007 4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */ 6 */
7 7
@@ -189,12 +189,14 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
189 unsigned long flags; 189 unsigned long flags;
190 struct alias_server *server, *newserver; 190 struct alias_server *server, *newserver;
191 struct alias_lcu *lcu, *newlcu; 191 struct alias_lcu *lcu, *newlcu;
192 int is_lcu_known;
192 struct dasd_uid uid; 193 struct dasd_uid uid;
193 194
194 private = (struct dasd_eckd_private *) device->private; 195 private = (struct dasd_eckd_private *) device->private;
195 196
196 device->discipline->get_uid(device, &uid); 197 device->discipline->get_uid(device, &uid);
197 spin_lock_irqsave(&aliastree.lock, flags); 198 spin_lock_irqsave(&aliastree.lock, flags);
199 is_lcu_known = 1;
198 server = _find_server(&uid); 200 server = _find_server(&uid);
199 if (!server) { 201 if (!server) {
200 spin_unlock_irqrestore(&aliastree.lock, flags); 202 spin_unlock_irqrestore(&aliastree.lock, flags);
@@ -206,6 +208,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
206 if (!server) { 208 if (!server) {
207 list_add(&newserver->server, &aliastree.serverlist); 209 list_add(&newserver->server, &aliastree.serverlist);
208 server = newserver; 210 server = newserver;
211 is_lcu_known = 0;
209 } else { 212 } else {
210 /* someone was faster */ 213 /* someone was faster */
211 _free_server(newserver); 214 _free_server(newserver);
@@ -223,10 +226,12 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
223 if (!lcu) { 226 if (!lcu) {
224 list_add(&newlcu->lcu, &server->lculist); 227 list_add(&newlcu->lcu, &server->lculist);
225 lcu = newlcu; 228 lcu = newlcu;
229 is_lcu_known = 0;
226 } else { 230 } else {
227 /* someone was faster */ 231 /* someone was faster */
228 _free_lcu(newlcu); 232 _free_lcu(newlcu);
229 } 233 }
234 is_lcu_known = 0;
230 } 235 }
231 spin_lock(&lcu->lock); 236 spin_lock(&lcu->lock);
232 list_add(&device->alias_list, &lcu->inactive_devices); 237 list_add(&device->alias_list, &lcu->inactive_devices);
@@ -234,7 +239,64 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
234 spin_unlock(&lcu->lock); 239 spin_unlock(&lcu->lock);
235 spin_unlock_irqrestore(&aliastree.lock, flags); 240 spin_unlock_irqrestore(&aliastree.lock, flags);
236 241
237 return 0; 242 return is_lcu_known;
243}
244
245/*
246 * The first device to be registered on an LCU will have to do
247 * some additional setup steps to configure that LCU on the
248 * storage server. All further devices should wait with their
249 * initialization until the first device is done.
250 * To synchronize this work, the first device will call
251 * dasd_alias_lcu_setup_complete when it is done, and all
252 * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
253 */
254void dasd_alias_lcu_setup_complete(struct dasd_device *device)
255{
256 unsigned long flags;
257 struct alias_server *server;
258 struct alias_lcu *lcu;
259 struct dasd_uid uid;
260
261 device->discipline->get_uid(device, &uid);
262 lcu = NULL;
263 spin_lock_irqsave(&aliastree.lock, flags);
264 server = _find_server(&uid);
265 if (server)
266 lcu = _find_lcu(server, &uid);
267 spin_unlock_irqrestore(&aliastree.lock, flags);
268 if (!lcu) {
269 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
270 "could not find lcu for %04x %02x",
271 uid.ssid, uid.real_unit_addr);
272 WARN_ON(1);
273 return;
274 }
275 complete_all(&lcu->lcu_setup);
276}
277
278void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
279{
280 unsigned long flags;
281 struct alias_server *server;
282 struct alias_lcu *lcu;
283 struct dasd_uid uid;
284
285 device->discipline->get_uid(device, &uid);
286 lcu = NULL;
287 spin_lock_irqsave(&aliastree.lock, flags);
288 server = _find_server(&uid);
289 if (server)
290 lcu = _find_lcu(server, &uid);
291 spin_unlock_irqrestore(&aliastree.lock, flags);
292 if (!lcu) {
293 DBF_EVENT_DEVID(DBF_ERR, device->cdev,
294 "could not find lcu for %04x %02x",
295 uid.ssid, uid.real_unit_addr);
296 WARN_ON(1);
297 return;
298 }
299 wait_for_completion(&lcu->lcu_setup);
238} 300}
239 301
240/* 302/*
@@ -384,29 +446,6 @@ static void _remove_device_from_lcu(struct alias_lcu *lcu,
384 group->next = NULL; 446 group->next = NULL;
385}; 447};
386 448
387static int
388suborder_not_supported(struct dasd_ccw_req *cqr)
389{
390 char *sense;
391 char reason;
392 char msg_format;
393 char msg_no;
394
395 sense = dasd_get_sense(&cqr->irb);
396 if (!sense)
397 return 0;
398
399 reason = sense[0];
400 msg_format = (sense[7] & 0xF0);
401 msg_no = (sense[7] & 0x0F);
402
403 /* command reject, Format 0 MSG 4 - invalid parameter */
404 if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
405 return 1;
406
407 return 0;
408}
409
410static int read_unit_address_configuration(struct dasd_device *device, 449static int read_unit_address_configuration(struct dasd_device *device,
411 struct alias_lcu *lcu) 450 struct alias_lcu *lcu)
412{ 451{
@@ -458,8 +497,6 @@ static int read_unit_address_configuration(struct dasd_device *device,
458 497
459 do { 498 do {
460 rc = dasd_sleep_on(cqr); 499 rc = dasd_sleep_on(cqr);
461 if (rc && suborder_not_supported(cqr))
462 return -EOPNOTSUPP;
463 } while (rc && (cqr->retries > 0)); 500 } while (rc && (cqr->retries > 0));
464 if (rc) { 501 if (rc) {
465 spin_lock_irqsave(&lcu->lock, flags); 502 spin_lock_irqsave(&lcu->lock, flags);
@@ -546,7 +583,7 @@ static void lcu_update_work(struct work_struct *work)
546 * processing the data 583 * processing the data
547 */ 584 */
548 spin_lock_irqsave(&lcu->lock, flags); 585 spin_lock_irqsave(&lcu->lock, flags);
549 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 586 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
550 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 587 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
551 " alias data in lcu (rc = %d), retry later", rc); 588 " alias data in lcu (rc = %d), retry later", rc);
552 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 589 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
@@ -668,16 +705,6 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
668 if (lcu->pav == NO_PAV || 705 if (lcu->pav == NO_PAV ||
669 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) 706 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
670 return NULL; 707 return NULL;
671 if (unlikely(!(private->features.feature[8] & 0x01))) {
672 /*
673 * PAV enabled but prefix not, very unlikely
674 * seems to be a lost pathgroup
675 * use base device to do IO
676 */
677 DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
678 "Prefix not enabled with PAV enabled\n");
679 return NULL;
680 }
681 708
682 spin_lock_irqsave(&lcu->lock, flags); 709 spin_lock_irqsave(&lcu->lock, flags);
683 alias_device = group->next; 710 alias_device = group->next;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index c196827c228..d71511c7850 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_devmap.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Copyright IBM Corp. 1999,2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
8 * 9 *
9 * Device mapping and dasd= parameter parsing functions. All devmap 10 * Device mapping and dasd= parameter parsing functions. All devmap
10 * functions may not be called from interrupt context. In particular 11 * functions may not be called from interrupt context. In particular
@@ -952,39 +953,6 @@ static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
952 dasd_use_raw_store); 953 dasd_use_raw_store);
953 954
954static ssize_t 955static ssize_t
955dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
956 const char *buf, size_t count)
957{
958 struct ccw_device *cdev = to_ccwdev(dev);
959 struct dasd_device *device;
960 int rc;
961
962 device = dasd_device_from_cdev(cdev);
963 if (IS_ERR(device)) {
964 rc = PTR_ERR(device);
965 goto out;
966 }
967
968 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
969 test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
970 /* Already doing offline processing */
971 dasd_put_device(device);
972 rc = -EBUSY;
973 goto out;
974 }
975
976 set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
977 dasd_put_device(device);
978
979 rc = ccw_device_set_offline(cdev);
980
981out:
982 return rc ? rc : count;
983}
984
985static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
986
987static ssize_t
988dasd_discipline_show(struct device *dev, struct device_attribute *attr, 956dasd_discipline_show(struct device *dev, struct device_attribute *attr,
989 char *buf) 957 char *buf)
990{ 958{
@@ -1353,7 +1321,6 @@ static struct attribute * dasd_attrs[] = {
1353 &dev_attr_expires.attr, 1321 &dev_attr_expires.attr,
1354 &dev_attr_reservation_policy.attr, 1322 &dev_attr_reservation_policy.attr,
1355 &dev_attr_last_known_reservation_state.attr, 1323 &dev_attr_last_known_reservation_state.attr,
1356 &dev_attr_safe_offline.attr,
1357 NULL, 1324 NULL,
1358}; 1325};
1359 1326
@@ -1378,7 +1345,7 @@ dasd_get_feature(struct ccw_device *cdev, int feature)
1378 1345
1379/* 1346/*
1380 * Set / reset given feature. 1347 * Set / reset given feature.
1381 * Flag indicates whether to set (!=0) or the reset (=0) the feature. 1348 * Flag indicates wether to set (!=0) or the reset (=0) the feature.
1382 */ 1349 */
1383int 1350int
1384dasd_set_feature(struct ccw_device *cdev, int feature, int flag) 1351dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 704488d0f81..46784b83c5c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_diag.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Based on.......: linux/drivers/s390/block/mdisk.c 4 * Based on.......: linux/drivers/s390/block/mdisk.c
4 * ...............: by Hartmunt Penner <hpenner@de.ibm.com> 5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
6 * Copyright IBM Corp. 1999, 2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
7 * 8 *
8 */ 9 */
9 10
@@ -228,7 +229,7 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
228} 229}
229 230
230/* Handle external interruption. */ 231/* Handle external interruption. */
231static void dasd_ext_handler(struct ext_code ext_code, 232static void dasd_ext_handler(unsigned int ext_int_code,
232 unsigned int param32, unsigned long param64) 233 unsigned int param32, unsigned long param64)
233{ 234{
234 struct dasd_ccw_req *cqr, *next; 235 struct dasd_ccw_req *cqr, *next;
@@ -238,7 +239,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
238 addr_t ip; 239 addr_t ip;
239 int rc; 240 int rc;
240 241
241 switch (ext_code.subcode >> 8) { 242 switch (ext_int_code >> 24) {
242 case DASD_DIAG_CODE_31BIT: 243 case DASD_DIAG_CODE_31BIT:
243 ip = (addr_t) param32; 244 ip = (addr_t) param32;
244 break; 245 break;
@@ -248,7 +249,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
248 default: 249 default:
249 return; 250 return;
250 } 251 }
251 inc_irq_stat(IRQEXT_DSD); 252 kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
252 if (!ip) { /* no intparm: unsolicited interrupt */ 253 if (!ip) { /* no intparm: unsolicited interrupt */
253 DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited " 254 DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
254 "interrupt"); 255 "interrupt");
@@ -279,7 +280,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
279 cqr->stopclk = get_clock(); 280 cqr->stopclk = get_clock();
280 281
281 expires = 0; 282 expires = 0;
282 if ((ext_code.subcode & 0xff) == 0) { 283 if ((ext_int_code & 0xff0000) == 0) {
283 cqr->status = DASD_CQR_SUCCESS; 284 cqr->status = DASD_CQR_SUCCESS;
284 /* Start first request on queue if possible -> fast_io. */ 285 /* Start first request on queue if possible -> fast_io. */
285 if (!list_empty(&device->ccw_queue)) { 286 if (!list_empty(&device->ccw_queue)) {
@@ -295,7 +296,7 @@ static void dasd_ext_handler(struct ext_code ext_code,
295 cqr->status = DASD_CQR_QUEUED; 296 cqr->status = DASD_CQR_QUEUED;
296 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for " 297 DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
297 "request %p was %d (%d retries left)", cqr, 298 "request %p was %d (%d retries left)", cqr,
298 ext_code.subcode & 0xff, cqr->retries); 299 (ext_int_code >> 16) & 0xff, cqr->retries);
299 dasd_diag_erp(device); 300 dasd_diag_erp(device);
300 } 301 }
301 302
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index a803cc73158..4f71fbe60c8 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -1,9 +1,10 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_diag.h
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Based on.......: linux/drivers/s390/block/mdisk.h 4 * Based on.......: linux/drivers/s390/block/mdisk.h
4 * ...............: by Hartmunt Penner <hpenner@de.ibm.com> 5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
6 * Copyright IBM Corp. 1999, 2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
7 * 8 *
8 */ 9 */
9 10
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index e37bc1620d1..6e835c9fdfc 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_eckd.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com>
@@ -17,13 +18,12 @@
17#include <linux/hdreg.h> /* HDIO_GETGEO */ 18#include <linux/hdreg.h> /* HDIO_GETGEO */
18#include <linux/bio.h> 19#include <linux/bio.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/compat.h>
21#include <linux/init.h> 21#include <linux/init.h>
22 22
23#include <asm/css_chars.h>
24#include <asm/debug.h> 23#include <asm/debug.h>
25#include <asm/idals.h> 24#include <asm/idals.h>
26#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
26#include <asm/compat.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/cio.h> 29#include <asm/cio.h>
@@ -32,6 +32,8 @@
32 32
33#include "dasd_int.h" 33#include "dasd_int.h"
34#include "dasd_eckd.h" 34#include "dasd_eckd.h"
35#include "../cio/chsc.h"
36
35 37
36#ifdef PRINTK_HEADER 38#ifdef PRINTK_HEADER
37#undef PRINTK_HEADER 39#undef PRINTK_HEADER
@@ -139,10 +141,6 @@ dasd_eckd_set_online(struct ccw_device *cdev)
139static const int sizes_trk0[] = { 28, 148, 84 }; 141static const int sizes_trk0[] = { 28, 148, 84 };
140#define LABEL_SIZE 140 142#define LABEL_SIZE 140
141 143
142/* head and record addresses of count_area read in analysis ccw */
143static const int count_area_head[] = { 0, 0, 0, 0, 2 };
144static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
145
146static inline unsigned int 144static inline unsigned int
147round_up_multiple(unsigned int no, unsigned int mult) 145round_up_multiple(unsigned int no, unsigned int mult)
148{ 146{
@@ -215,7 +213,7 @@ check_XRC (struct ccw1 *de_ccw,
215 213
216 rc = get_sync_clock(&data->ep_sys_time); 214 rc = get_sync_clock(&data->ep_sys_time);
217 /* Ignore return code if sync clock is switched off. */ 215 /* Ignore return code if sync clock is switched off. */
218 if (rc == -EOPNOTSUPP || rc == -EACCES) 216 if (rc == -ENOSYS || rc == -EACCES)
219 rc = 0; 217 rc = 0;
220 218
221 de_ccw->count = sizeof(struct DE_eckd_data); 219 de_ccw->count = sizeof(struct DE_eckd_data);
@@ -326,7 +324,7 @@ static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
326 324
327 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); 325 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
328 /* Ignore return code if sync clock is switched off. */ 326 /* Ignore return code if sync clock is switched off. */
329 if (rc == -EOPNOTSUPP || rc == -EACCES) 327 if (rc == -ENOSYS || rc == -EACCES)
330 rc = 0; 328 rc = 0;
331 return rc; 329 return rc;
332} 330}
@@ -754,13 +752,24 @@ dasd_eckd_cdl_reclen(int recid)
754 return sizes_trk0[recid]; 752 return sizes_trk0[recid];
755 return LABEL_SIZE; 753 return LABEL_SIZE;
756} 754}
757/* create unique id from private structure. */ 755
758static void create_uid(struct dasd_eckd_private *private) 756/*
757 * Generate device unique id that specifies the physical device.
758 */
759static int dasd_eckd_generate_uid(struct dasd_device *device)
759{ 760{
760 int count; 761 struct dasd_eckd_private *private;
761 struct dasd_uid *uid; 762 struct dasd_uid *uid;
763 int count;
764 unsigned long flags;
762 765
766 private = (struct dasd_eckd_private *) device->private;
767 if (!private)
768 return -ENODEV;
769 if (!private->ned || !private->gneq)
770 return -ENODEV;
763 uid = &private->uid; 771 uid = &private->uid;
772 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
764 memset(uid, 0, sizeof(struct dasd_uid)); 773 memset(uid, 0, sizeof(struct dasd_uid));
765 memcpy(uid->vendor, private->ned->HDA_manufacturer, 774 memcpy(uid->vendor, private->ned->HDA_manufacturer,
766 sizeof(uid->vendor) - 1); 775 sizeof(uid->vendor) - 1);
@@ -783,23 +792,6 @@ static void create_uid(struct dasd_eckd_private *private)
783 private->vdsneq->uit[count]); 792 private->vdsneq->uit[count]);
784 } 793 }
785 } 794 }
786}
787
788/*
789 * Generate device unique id that specifies the physical device.
790 */
791static int dasd_eckd_generate_uid(struct dasd_device *device)
792{
793 struct dasd_eckd_private *private;
794 unsigned long flags;
795
796 private = (struct dasd_eckd_private *) device->private;
797 if (!private)
798 return -ENODEV;
799 if (!private->ned || !private->gneq)
800 return -ENODEV;
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 create_uid(private);
803 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 795 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
804 return 0; 796 return 0;
805} 797}
@@ -819,21 +811,6 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
819 return -EINVAL; 811 return -EINVAL;
820} 812}
821 813
822/*
823 * compare device UID with data of a given dasd_eckd_private structure
824 * return 0 for match
825 */
826static int dasd_eckd_compare_path_uid(struct dasd_device *device,
827 struct dasd_eckd_private *private)
828{
829 struct dasd_uid device_uid;
830
831 create_uid(private);
832 dasd_eckd_get_uid(device, &device_uid);
833
834 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
835}
836
837static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, 814static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
838 struct dasd_ccw_req *cqr, 815 struct dasd_ccw_req *cqr,
839 __u8 *rcd_buffer, 816 __u8 *rcd_buffer,
@@ -867,30 +844,6 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
867 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 844 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
868} 845}
869 846
870/*
871 * Wakeup helper for read_conf
872 * if the cqr is not done and needs some error recovery
873 * the buffer has to be re-initialized with the EBCDIC "V1.0"
874 * to show support for virtual device SNEQ
875 */
876static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
877{
878 struct ccw1 *ccw;
879 __u8 *rcd_buffer;
880
881 if (cqr->status != DASD_CQR_DONE) {
882 ccw = cqr->cpaddr;
883 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
884 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
885
886 rcd_buffer[0] = 0xE5;
887 rcd_buffer[1] = 0xF1;
888 rcd_buffer[2] = 0x4B;
889 rcd_buffer[3] = 0xF0;
890 }
891 dasd_wakeup_cb(cqr, data);
892}
893
894static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 847static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
895 struct dasd_ccw_req *cqr, 848 struct dasd_ccw_req *cqr,
896 __u8 *rcd_buffer, 849 __u8 *rcd_buffer,
@@ -910,7 +863,6 @@ static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
910 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
911 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 864 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
912 cqr->retries = 5; 865 cqr->retries = 5;
913 cqr->callback = read_conf_cb;
914 rc = dasd_sleep_on_immediatly(cqr); 866 rc = dasd_sleep_on_immediatly(cqr);
915 return rc; 867 return rc;
916} 868}
@@ -948,7 +900,6 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
948 goto out_error; 900 goto out_error;
949 } 901 }
950 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 902 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
951 cqr->callback = read_conf_cb;
952 ret = dasd_sleep_on(cqr); 903 ret = dasd_sleep_on(cqr);
953 /* 904 /*
954 * on success we update the user input parms 905 * on success we update the user input parms
@@ -1026,125 +977,62 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1026{ 977{
1027 void *conf_data; 978 void *conf_data;
1028 int conf_len, conf_data_saved; 979 int conf_len, conf_data_saved;
1029 int rc, path_err; 980 int rc;
1030 __u8 lpm, opm; 981 __u8 lpm, opm;
1031 struct dasd_eckd_private *private, path_private; 982 struct dasd_eckd_private *private;
1032 struct dasd_path *path_data; 983 struct dasd_path *path_data;
1033 struct dasd_uid *uid;
1034 char print_path_uid[60], print_device_uid[60];
1035 984
1036 private = (struct dasd_eckd_private *) device->private; 985 private = (struct dasd_eckd_private *) device->private;
1037 path_data = &device->path_data; 986 path_data = &device->path_data;
1038 opm = ccw_device_get_path_mask(device->cdev); 987 opm = ccw_device_get_path_mask(device->cdev);
988 lpm = 0x80;
1039 conf_data_saved = 0; 989 conf_data_saved = 0;
1040 path_err = 0;
1041 /* get configuration data per operational path */ 990 /* get configuration data per operational path */
1042 for (lpm = 0x80; lpm; lpm>>= 1) { 991 for (lpm = 0x80; lpm; lpm>>= 1) {
1043 if (!(lpm & opm)) 992 if (lpm & opm) {
1044 continue; 993 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1045 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 994 &conf_len, lpm);
1046 &conf_len, lpm); 995 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
1047 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 996 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1048 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 997 "Read configuration data returned "
1049 "Read configuration data returned " 998 "error %d", rc);
1050 "error %d", rc); 999 return rc;
1051 return rc;
1052 }
1053 if (conf_data == NULL) {
1054 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1055 "No configuration data "
1056 "retrieved");
1057 /* no further analysis possible */
1058 path_data->opm |= lpm;
1059 continue; /* no error */
1060 }
1061 /* save first valid configuration data */
1062 if (!conf_data_saved) {
1063 kfree(private->conf_data);
1064 private->conf_data = conf_data;
1065 private->conf_len = conf_len;
1066 if (dasd_eckd_identify_conf_parts(private)) {
1067 private->conf_data = NULL;
1068 private->conf_len = 0;
1069 kfree(conf_data);
1070 continue;
1071 } 1000 }
1072 /* 1001 if (conf_data == NULL) {
1073 * build device UID that other path data 1002 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1074 * can be compared to it 1003 "No configuration data "
1075 */ 1004 "retrieved");
1076 dasd_eckd_generate_uid(device); 1005 /* no further analysis possible */
1077 conf_data_saved++; 1006 path_data->opm |= lpm;
1078 } else { 1007 continue; /* no error */
1079 path_private.conf_data = conf_data;
1080 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1081 if (dasd_eckd_identify_conf_parts(
1082 &path_private)) {
1083 path_private.conf_data = NULL;
1084 path_private.conf_len = 0;
1085 kfree(conf_data);
1086 continue;
1087 } 1008 }
1088 1009 /* save first valid configuration data */
1089 if (dasd_eckd_compare_path_uid( 1010 if (!conf_data_saved) {
1090 device, &path_private)) { 1011 kfree(private->conf_data);
1091 uid = &path_private.uid; 1012 private->conf_data = conf_data;
1092 if (strlen(uid->vduit) > 0) 1013 private->conf_len = conf_len;
1093 snprintf(print_path_uid, 1014 if (dasd_eckd_identify_conf_parts(private)) {
1094 sizeof(print_path_uid), 1015 private->conf_data = NULL;
1095 "%s.%s.%04x.%02x.%s", 1016 private->conf_len = 0;
1096 uid->vendor, uid->serial, 1017 kfree(conf_data);
1097 uid->ssid, uid->real_unit_addr, 1018 continue;
1098 uid->vduit); 1019 }
1099 else 1020 conf_data_saved++;
1100 snprintf(print_path_uid,
1101 sizeof(print_path_uid),
1102 "%s.%s.%04x.%02x",
1103 uid->vendor, uid->serial,
1104 uid->ssid,
1105 uid->real_unit_addr);
1106 uid = &private->uid;
1107 if (strlen(uid->vduit) > 0)
1108 snprintf(print_device_uid,
1109 sizeof(print_device_uid),
1110 "%s.%s.%04x.%02x.%s",
1111 uid->vendor, uid->serial,
1112 uid->ssid, uid->real_unit_addr,
1113 uid->vduit);
1114 else
1115 snprintf(print_device_uid,
1116 sizeof(print_device_uid),
1117 "%s.%s.%04x.%02x",
1118 uid->vendor, uid->serial,
1119 uid->ssid,
1120 uid->real_unit_addr);
1121 dev_err(&device->cdev->dev,
1122 "Not all channel paths lead to "
1123 "the same device, path %02X leads to "
1124 "device %s instead of %s\n", lpm,
1125 print_path_uid, print_device_uid);
1126 path_err = -EINVAL;
1127 continue;
1128 } 1021 }
1129 1022 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1130 path_private.conf_data = NULL; 1023 case 0x02:
1131 path_private.conf_len = 0; 1024 path_data->npm |= lpm;
1132 } 1025 break;
1133 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1026 case 0x03:
1134 case 0x02: 1027 path_data->ppm |= lpm;
1135 path_data->npm |= lpm; 1028 break;
1136 break; 1029 }
1137 case 0x03: 1030 path_data->opm |= lpm;
1138 path_data->ppm |= lpm; 1031 if (conf_data != private->conf_data)
1139 break; 1032 kfree(conf_data);
1140 } 1033 }
1141 path_data->opm |= lpm;
1142
1143 if (conf_data != private->conf_data)
1144 kfree(conf_data);
1145 } 1034 }
1146 1035 return 0;
1147 return path_err;
1148} 1036}
1149 1037
1150static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1038static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
@@ -1176,200 +1064,80 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1176 return 0; 1064 return 0;
1177} 1065}
1178 1066
1179static int rebuild_device_uid(struct dasd_device *device,
1180 struct path_verification_work_data *data)
1181{
1182 struct dasd_eckd_private *private;
1183 struct dasd_path *path_data;
1184 __u8 lpm, opm;
1185 int rc;
1186
1187 rc = -ENODEV;
1188 private = (struct dasd_eckd_private *) device->private;
1189 path_data = &device->path_data;
1190 opm = device->path_data.opm;
1191
1192 for (lpm = 0x80; lpm; lpm >>= 1) {
1193 if (!(lpm & opm))
1194 continue;
1195 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1196 memset(&data->cqr, 0, sizeof(data->cqr));
1197 data->cqr.cpaddr = &data->ccw;
1198 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1199 data->rcd_buffer,
1200 lpm);
1201
1202 if (rc) {
1203 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1204 continue;
1205 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1206 "Read configuration data "
1207 "returned error %d", rc);
1208 break;
1209 }
1210 memcpy(private->conf_data, data->rcd_buffer,
1211 DASD_ECKD_RCD_DATA_SIZE);
1212 if (dasd_eckd_identify_conf_parts(private)) {
1213 rc = -ENODEV;
1214 } else /* first valid path is enough */
1215 break;
1216 }
1217
1218 if (!rc)
1219 rc = dasd_eckd_generate_uid(device);
1220
1221 return rc;
1222}
1223
1224static void do_path_verification_work(struct work_struct *work) 1067static void do_path_verification_work(struct work_struct *work)
1225{ 1068{
1226 struct path_verification_work_data *data; 1069 struct path_verification_work_data *data;
1227 struct dasd_device *device; 1070 struct dasd_device *device;
1228 struct dasd_eckd_private path_private;
1229 struct dasd_uid *uid;
1230 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1231 __u8 lpm, opm, npm, ppm, epm; 1071 __u8 lpm, opm, npm, ppm, epm;
1232 unsigned long flags; 1072 unsigned long flags;
1233 char print_uid[60];
1234 int rc; 1073 int rc;
1235 1074
1236 data = container_of(work, struct path_verification_work_data, worker); 1075 data = container_of(work, struct path_verification_work_data, worker);
1237 device = data->device; 1076 device = data->device;
1238 1077
1239 /* delay path verification until device was resumed */
1240 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1241 schedule_work(work);
1242 return;
1243 }
1244
1245 opm = 0; 1078 opm = 0;
1246 npm = 0; 1079 npm = 0;
1247 ppm = 0; 1080 ppm = 0;
1248 epm = 0; 1081 epm = 0;
1249 for (lpm = 0x80; lpm; lpm >>= 1) { 1082 for (lpm = 0x80; lpm; lpm >>= 1) {
1250 if (!(lpm & data->tbvpm)) 1083 if (lpm & data->tbvpm) {
1251 continue; 1084 memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1252 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1085 memset(&data->cqr, 0, sizeof(data->cqr));
1253 memset(&data->cqr, 0, sizeof(data->cqr)); 1086 data->cqr.cpaddr = &data->ccw;
1254 data->cqr.cpaddr = &data->ccw; 1087 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1255 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1088 data->rcd_buffer,
1256 data->rcd_buffer, 1089 lpm);
1257 lpm); 1090 if (!rc) {
1258 if (!rc) { 1091 switch (dasd_eckd_path_access(data->rcd_buffer,
1259 switch (dasd_eckd_path_access(data->rcd_buffer, 1092 DASD_ECKD_RCD_DATA_SIZE)) {
1260 DASD_ECKD_RCD_DATA_SIZE) 1093 case 0x02:
1261 ) { 1094 npm |= lpm;
1262 case 0x02: 1095 break;
1263 npm |= lpm; 1096 case 0x03:
1264 break; 1097 ppm |= lpm;
1265 case 0x03: 1098 break;
1266 ppm |= lpm; 1099 }
1267 break; 1100 opm |= lpm;
1268 } 1101 } else if (rc == -EOPNOTSUPP) {
1269 opm |= lpm; 1102 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1270 } else if (rc == -EOPNOTSUPP) { 1103 "path verification: No configuration "
1271 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1104 "data retrieved");
1272 "path verification: No configuration " 1105 opm |= lpm;
1273 "data retrieved"); 1106 } else if (rc == -EAGAIN) {
1274 opm |= lpm; 1107 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1275 } else if (rc == -EAGAIN) {
1276 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1277 "path verification: device is stopped," 1108 "path verification: device is stopped,"
1278 " try again later"); 1109 " try again later");
1279 epm |= lpm; 1110 epm |= lpm;
1280 } else { 1111 } else {
1281 dev_warn(&device->cdev->dev, 1112 dev_warn(&device->cdev->dev,
1282 "Reading device feature codes failed " 1113 "Reading device feature codes failed "
1283 "(rc=%d) for new path %x\n", rc, lpm); 1114 "(rc=%d) for new path %x\n", rc, lpm);
1284 continue; 1115 continue;
1285 } 1116 }
1286 if (verify_fcx_max_data(device, lpm)) { 1117 if (verify_fcx_max_data(device, lpm)) {
1287 opm &= ~lpm;
1288 npm &= ~lpm;
1289 ppm &= ~lpm;
1290 continue;
1291 }
1292
1293 /*
1294 * save conf_data for comparison after
1295 * rebuild_device_uid may have changed
1296 * the original data
1297 */
1298 memcpy(&path_rcd_buf, data->rcd_buffer,
1299 DASD_ECKD_RCD_DATA_SIZE);
1300 path_private.conf_data = (void *) &path_rcd_buf;
1301 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1302 if (dasd_eckd_identify_conf_parts(&path_private)) {
1303 path_private.conf_data = NULL;
1304 path_private.conf_len = 0;
1305 continue;
1306 }
1307
1308 /*
1309 * compare path UID with device UID only if at least
1310 * one valid path is left
1311 * in other case the device UID may have changed and
1312 * the first working path UID will be used as device UID
1313 */
1314 if (device->path_data.opm &&
1315 dasd_eckd_compare_path_uid(device, &path_private)) {
1316 /*
1317 * the comparison was not successful
1318 * rebuild the device UID with at least one
1319 * known path in case a z/VM hyperswap command
1320 * has changed the device
1321 *
1322 * after this compare again
1323 *
1324 * if either the rebuild or the recompare fails
1325 * the path can not be used
1326 */
1327 if (rebuild_device_uid(device, data) ||
1328 dasd_eckd_compare_path_uid(
1329 device, &path_private)) {
1330 uid = &path_private.uid;
1331 if (strlen(uid->vduit) > 0)
1332 snprintf(print_uid, sizeof(print_uid),
1333 "%s.%s.%04x.%02x.%s",
1334 uid->vendor, uid->serial,
1335 uid->ssid, uid->real_unit_addr,
1336 uid->vduit);
1337 else
1338 snprintf(print_uid, sizeof(print_uid),
1339 "%s.%s.%04x.%02x",
1340 uid->vendor, uid->serial,
1341 uid->ssid,
1342 uid->real_unit_addr);
1343 dev_err(&device->cdev->dev,
1344 "The newly added channel path %02X "
1345 "will not be used because it leads "
1346 "to a different device %s\n",
1347 lpm, print_uid);
1348 opm &= ~lpm; 1118 opm &= ~lpm;
1349 npm &= ~lpm; 1119 npm &= ~lpm;
1350 ppm &= ~lpm; 1120 ppm &= ~lpm;
1351 continue;
1352 } 1121 }
1353 } 1122 }
1354
1355 /*
1356 * There is a small chance that a path is lost again between
1357 * above path verification and the following modification of
1358 * the device opm mask. We could avoid that race here by using
1359 * yet another path mask, but we rather deal with this unlikely
1360 * situation in dasd_start_IO.
1361 */
1362 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1363 if (!device->path_data.opm && opm) {
1364 device->path_data.opm = opm;
1365 dasd_generic_path_operational(device);
1366 } else
1367 device->path_data.opm |= opm;
1368 device->path_data.npm |= npm;
1369 device->path_data.ppm |= ppm;
1370 device->path_data.tbvpm |= epm;
1371 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1372 } 1123 }
1124 /*
1125 * There is a small chance that a path is lost again between
1126 * above path verification and the following modification of
1127 * the device opm mask. We could avoid that race here by using
1128 * yet another path mask, but we rather deal with this unlikely
1129 * situation in dasd_start_IO.
1130 */
1131 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1132 if (!device->path_data.opm && opm) {
1133 device->path_data.opm = opm;
1134 dasd_generic_path_operational(device);
1135 } else
1136 device->path_data.opm |= opm;
1137 device->path_data.npm |= npm;
1138 device->path_data.ppm |= ppm;
1139 device->path_data.tbvpm |= epm;
1140 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1373 1141
1374 dasd_put_device(device); 1142 dasd_put_device(device);
1375 if (data->isglobal) 1143 if (data->isglobal)
@@ -1512,8 +1280,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1512 * call might change behaviour of DASD devices. 1280 * call might change behaviour of DASD devices.
1513 */ 1281 */
1514static int 1282static int
1515dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, 1283dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1516 unsigned long flags)
1517{ 1284{
1518 struct dasd_ccw_req *cqr; 1285 struct dasd_ccw_req *cqr;
1519 int rc; 1286 int rc;
@@ -1522,19 +1289,10 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1522 if (IS_ERR(cqr)) 1289 if (IS_ERR(cqr))
1523 return PTR_ERR(cqr); 1290 return PTR_ERR(cqr);
1524 1291
1525 /*
1526 * set flags e.g. turn on failfast, to prevent blocking
1527 * the calling function should handle failed requests
1528 */
1529 cqr->flags |= flags;
1530
1531 rc = dasd_sleep_on(cqr); 1292 rc = dasd_sleep_on(cqr);
1532 if (!rc) 1293 if (!rc)
1533 /* trigger CIO to reprobe devices */ 1294 /* trigger CIO to reprobe devices */
1534 css_schedule_reprobe(); 1295 css_schedule_reprobe();
1535 else if (cqr->intrc == -EAGAIN)
1536 rc = -EAGAIN;
1537
1538 dasd_sfree_request(cqr, cqr->memdev); 1296 dasd_sfree_request(cqr, cqr->memdev);
1539 return rc; 1297 return rc;
1540} 1298}
@@ -1542,58 +1300,23 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1542/* 1300/*
1543 * Valide storage server of current device. 1301 * Valide storage server of current device.
1544 */ 1302 */
1545static int dasd_eckd_validate_server(struct dasd_device *device, 1303static void dasd_eckd_validate_server(struct dasd_device *device)
1546 unsigned long flags)
1547{ 1304{
1548 int rc; 1305 int rc;
1549 struct dasd_eckd_private *private; 1306 struct dasd_eckd_private *private;
1550 int enable_pav; 1307 int enable_pav;
1551 1308
1552 private = (struct dasd_eckd_private *) device->private;
1553 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1554 private->uid.type == UA_HYPER_PAV_ALIAS)
1555 return 0;
1556 if (dasd_nopav || MACHINE_IS_VM) 1309 if (dasd_nopav || MACHINE_IS_VM)
1557 enable_pav = 0; 1310 enable_pav = 0;
1558 else 1311 else
1559 enable_pav = 1; 1312 enable_pav = 1;
1560 rc = dasd_eckd_psf_ssc(device, enable_pav, flags); 1313 rc = dasd_eckd_psf_ssc(device, enable_pav);
1561 1314
1562 /* may be requested feature is not available on server, 1315 /* may be requested feature is not available on server,
1563 * therefore just report error and go ahead */ 1316 * therefore just report error and go ahead */
1317 private = (struct dasd_eckd_private *) device->private;
1564 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " 1318 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1565 "returned rc=%d", private->uid.ssid, rc); 1319 "returned rc=%d", private->uid.ssid, rc);
1566 return rc;
1567}
1568
1569/*
1570 * worker to do a validate server in case of a lost pathgroup
1571 */
1572static void dasd_eckd_do_validate_server(struct work_struct *work)
1573{
1574 struct dasd_device *device = container_of(work, struct dasd_device,
1575 kick_validate);
1576 if (dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST)
1577 == -EAGAIN) {
1578 /* schedule worker again if failed */
1579 schedule_work(&device->kick_validate);
1580 return;
1581 }
1582
1583 dasd_put_device(device);
1584}
1585
1586static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1587{
1588 dasd_get_device(device);
1589 /* exit if device not online or in offline processing */
1590 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1591 device->state < DASD_STATE_ONLINE) {
1592 dasd_put_device(device);
1593 return;
1594 }
1595 /* queue call to do_validate_server to the kernel event daemon. */
1596 schedule_work(&device->kick_validate);
1597} 1320}
1598 1321
1599static u32 get_fcx_max_data(struct dasd_device *device) 1322static u32 get_fcx_max_data(struct dasd_device *device)
@@ -1637,13 +1360,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1637 struct dasd_eckd_private *private; 1360 struct dasd_eckd_private *private;
1638 struct dasd_block *block; 1361 struct dasd_block *block;
1639 struct dasd_uid temp_uid; 1362 struct dasd_uid temp_uid;
1640 int rc, i; 1363 int is_known, rc, i;
1641 int readonly; 1364 int readonly;
1642 unsigned long value; 1365 unsigned long value;
1643 1366
1644 /* setup work queue for validate server*/
1645 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
1646
1647 if (!ccw_device_is_pathgroup(device->cdev)) { 1367 if (!ccw_device_is_pathgroup(device->cdev)) {
1648 dev_warn(&device->cdev->dev, 1368 dev_warn(&device->cdev->dev,
1649 "A channel path group could not be established\n"); 1369 "A channel path group could not be established\n");
@@ -1689,6 +1409,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1689 device->default_expires = value; 1409 device->default_expires = value;
1690 } 1410 }
1691 1411
1412 /* Generate device unique id */
1413 rc = dasd_eckd_generate_uid(device);
1414 if (rc)
1415 goto out_err1;
1416
1692 dasd_eckd_get_uid(device, &temp_uid); 1417 dasd_eckd_get_uid(device, &temp_uid);
1693 if (temp_uid.type == UA_BASE_DEVICE) { 1418 if (temp_uid.type == UA_BASE_DEVICE) {
1694 block = dasd_alloc_block(); 1419 block = dasd_alloc_block();
@@ -1703,12 +1428,22 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
1703 block->base = device; 1428 block->base = device;
1704 } 1429 }
1705 1430
1706 /* register lcu with alias handling, enable PAV */ 1431 /* register lcu with alias handling, enable PAV if this is a new lcu */
1707 rc = dasd_alias_make_device_known_to_lcu(device); 1432 is_known = dasd_alias_make_device_known_to_lcu(device);
1708 if (rc) 1433 if (is_known < 0) {
1434 rc = is_known;
1709 goto out_err2; 1435 goto out_err2;
1710 1436 }
1711 dasd_eckd_validate_server(device, 0); 1437 /*
1438 * dasd_eckd_validate_server is done on the first device that
1439 * is found for an LCU. All later other devices have to wait
1440 * for it, so they will read the correct feature codes.
1441 */
1442 if (!is_known) {
1443 dasd_eckd_validate_server(device);
1444 dasd_alias_lcu_setup_complete(device);
1445 } else
1446 dasd_alias_wait_for_lcu_setup(device);
1712 1447
1713 /* device may report different configuration data after LCU setup */ 1448 /* device may report different configuration data after LCU setup */
1714 rc = dasd_eckd_read_conf(device); 1449 rc = dasd_eckd_read_conf(device);
@@ -1945,10 +1680,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1945 count_area = NULL; 1680 count_area = NULL;
1946 for (i = 0; i < 3; i++) { 1681 for (i = 0; i < 3; i++) {
1947 if (private->count_area[i].kl != 4 || 1682 if (private->count_area[i].kl != 4 ||
1948 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || 1683 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1949 private->count_area[i].cyl != 0 ||
1950 private->count_area[i].head != count_area_head[i] ||
1951 private->count_area[i].record != count_area_rec[i]) {
1952 private->uses_cdl = 0; 1684 private->uses_cdl = 0;
1953 break; 1685 break;
1954 } 1686 }
@@ -1960,10 +1692,7 @@ static int dasd_eckd_end_analysis(struct dasd_block *block)
1960 for (i = 0; i < 5; i++) { 1692 for (i = 0; i < 5; i++) {
1961 if ((private->count_area[i].kl != 0) || 1693 if ((private->count_area[i].kl != 0) ||
1962 (private->count_area[i].dl != 1694 (private->count_area[i].dl !=
1963 private->count_area[0].dl) || 1695 private->count_area[0].dl))
1964 private->count_area[i].cyl != 0 ||
1965 private->count_area[i].head != count_area_head[i] ||
1966 private->count_area[i].record != count_area_rec[i])
1967 break; 1696 break;
1968 } 1697 }
1969 if (i == 5) 1698 if (i == 5)
@@ -2027,7 +1756,6 @@ static int dasd_eckd_ready_to_online(struct dasd_device *device)
2027static int dasd_eckd_online_to_ready(struct dasd_device *device) 1756static int dasd_eckd_online_to_ready(struct dasd_device *device)
2028{ 1757{
2029 cancel_work_sync(&device->reload_device); 1758 cancel_work_sync(&device->reload_device);
2030 cancel_work_sync(&device->kick_validate);
2031 return dasd_alias_remove_device(device); 1759 return dasd_alias_remove_device(device);
2032}; 1760};
2033 1761
@@ -2293,14 +2021,9 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2293 /* first of all check for state change pending interrupt */ 2021 /* first of all check for state change pending interrupt */
2294 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 2022 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
2295 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 2023 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
2296 /* 2024 /* for alias only and not in offline processing*/
2297 * for alias only, not in offline processing
2298 * and only if not suspended
2299 */
2300 if (!device->block && private->lcu && 2025 if (!device->block && private->lcu &&
2301 device->state == DASD_STATE_ONLINE && 2026 !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2302 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2303 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
2304 /* 2027 /*
2305 * the state change could be caused by an alias 2028 * the state change could be caused by an alias
2306 * reassignment remove device from alias handling 2029 * reassignment remove device from alias handling
@@ -2447,7 +2170,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2447 sizeof(struct PFX_eckd_data)); 2170 sizeof(struct PFX_eckd_data));
2448 } else { 2171 } else {
2449 if (define_extent(ccw++, cqr->data, first_trk, 2172 if (define_extent(ccw++, cqr->data, first_trk,
2450 last_trk, cmd, basedev) == -EAGAIN) { 2173 last_trk, cmd, startdev) == -EAGAIN) {
2451 /* Clock not in sync and XRC is enabled. 2174 /* Clock not in sync and XRC is enabled.
2452 * Try again later. 2175 * Try again later.
2453 */ 2176 */
@@ -2627,7 +2350,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2627 new_track = 1; 2350 new_track = 1;
2628 end_idaw = 0; 2351 end_idaw = 0;
2629 len_to_track_end = 0; 2352 len_to_track_end = 0;
2630 idaw_dst = NULL; 2353 idaw_dst = 0;
2631 idaw_len = 0; 2354 idaw_len = 0;
2632 rq_for_each_segment(bv, req, iter) { 2355 rq_for_each_segment(bv, req, iter) {
2633 dst = page_address(bv->bv_page) + bv->bv_offset; 2356 dst = page_address(bv->bv_page) + bv->bv_offset;
@@ -2689,7 +2412,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2689 if (end_idaw) { 2412 if (end_idaw) {
2690 idaws = idal_create_words(idaws, idaw_dst, 2413 idaws = idal_create_words(idaws, idaw_dst,
2691 idaw_len); 2414 idaw_len);
2692 idaw_dst = NULL; 2415 idaw_dst = 0;
2693 idaw_len = 0; 2416 idaw_len = 0;
2694 end_idaw = 0; 2417 end_idaw = 0;
2695 } 2418 }
@@ -2872,7 +2595,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2872 sector_t recid, trkid; 2595 sector_t recid, trkid;
2873 unsigned int offs; 2596 unsigned int offs;
2874 unsigned int count, count_to_trk_end; 2597 unsigned int count, count_to_trk_end;
2875 int ret;
2876 2598
2877 basedev = block->base; 2599 basedev = block->base;
2878 if (rq_data_dir(req) == READ) { 2600 if (rq_data_dir(req) == READ) {
@@ -2913,8 +2635,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2913 2635
2914 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 2636 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2915 if (IS_ERR(itcw)) { 2637 if (IS_ERR(itcw)) {
2916 ret = -EINVAL; 2638 dasd_sfree_request(cqr, startdev);
2917 goto out_error; 2639 return ERR_PTR(-EINVAL);
2918 } 2640 }
2919 cqr->cpaddr = itcw_get_tcw(itcw); 2641 cqr->cpaddr = itcw_get_tcw(itcw);
2920 if (prepare_itcw(itcw, first_trk, last_trk, 2642 if (prepare_itcw(itcw, first_trk, last_trk,
@@ -2926,8 +2648,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2926 /* Clock not in sync and XRC is enabled. 2648 /* Clock not in sync and XRC is enabled.
2927 * Try again later. 2649 * Try again later.
2928 */ 2650 */
2929 ret = -EAGAIN; 2651 dasd_sfree_request(cqr, startdev);
2930 goto out_error; 2652 return ERR_PTR(-EAGAIN);
2931 } 2653 }
2932 len_to_track_end = 0; 2654 len_to_track_end = 0;
2933 /* 2655 /*
@@ -2966,10 +2688,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2966 tidaw_flags = 0; 2688 tidaw_flags = 0;
2967 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 2689 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
2968 dst, part_len); 2690 dst, part_len);
2969 if (IS_ERR(last_tidaw)) { 2691 if (IS_ERR(last_tidaw))
2970 ret = -EINVAL; 2692 return ERR_PTR(-EINVAL);
2971 goto out_error;
2972 }
2973 dst += part_len; 2693 dst += part_len;
2974 } 2694 }
2975 } 2695 }
@@ -2978,10 +2698,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2978 dst = page_address(bv->bv_page) + bv->bv_offset; 2698 dst = page_address(bv->bv_page) + bv->bv_offset;
2979 last_tidaw = itcw_add_tidaw(itcw, 0x00, 2699 last_tidaw = itcw_add_tidaw(itcw, 0x00,
2980 dst, bv->bv_len); 2700 dst, bv->bv_len);
2981 if (IS_ERR(last_tidaw)) { 2701 if (IS_ERR(last_tidaw))
2982 ret = -EINVAL; 2702 return ERR_PTR(-EINVAL);
2983 goto out_error;
2984 }
2985 } 2703 }
2986 } 2704 }
2987 last_tidaw->flags |= TIDAW_FLAGS_LAST; 2705 last_tidaw->flags |= TIDAW_FLAGS_LAST;
@@ -3001,9 +2719,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3001 cqr->buildclk = get_clock(); 2719 cqr->buildclk = get_clock();
3002 cqr->status = DASD_CQR_FILLED; 2720 cqr->status = DASD_CQR_FILLED;
3003 return cqr; 2721 return cqr;
3004out_error:
3005 dasd_sfree_request(cqr, startdev);
3006 return ERR_PTR(ret);
3007} 2722}
3008 2723
3009static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 2724static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
@@ -3833,7 +3548,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
3833 case BIODASDSYMMIO: 3548 case BIODASDSYMMIO:
3834 return dasd_symm_io(device, argp); 3549 return dasd_symm_io(device, argp);
3835 default: 3550 default:
3836 return -ENOTTY; 3551 return -ENOIOCTLCMD;
3837 } 3552 }
3838} 3553}
3839 3554
@@ -3849,7 +3564,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
3849 3564
3850 len = 0; 3565 len = 0;
3851 while (from <= to) { 3566 while (from <= to) {
3852 len += sprintf(page + len, PRINTK_HEADER 3567 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3853 " CCW %p: %08X %08X DAT:", 3568 " CCW %p: %08X %08X DAT:",
3854 from, ((int *) from)[0], ((int *) from)[1]); 3569 from, ((int *) from)[0], ((int *) from)[1]);
3855 3570
@@ -3910,23 +3625,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3910 return; 3625 return;
3911 } 3626 }
3912 /* dump the sense data */ 3627 /* dump the sense data */
3913 len = sprintf(page, PRINTK_HEADER 3628 len = sprintf(page, KERN_ERR PRINTK_HEADER
3914 " I/O status report for device %s:\n", 3629 " I/O status report for device %s:\n",
3915 dev_name(&device->cdev->dev)); 3630 dev_name(&device->cdev->dev));
3916 len += sprintf(page + len, PRINTK_HEADER 3631 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3917 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 3632 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3918 "CS:%02X RC:%d\n", 3633 "CS:%02X RC:%d\n",
3919 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 3634 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3920 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 3635 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3921 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 3636 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3922 req ? req->intrc : 0); 3637 req ? req->intrc : 0);
3923 len += sprintf(page + len, PRINTK_HEADER 3638 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3924 " device %s: Failing CCW: %p\n", 3639 " device %s: Failing CCW: %p\n",
3925 dev_name(&device->cdev->dev), 3640 dev_name(&device->cdev->dev),
3926 (void *) (addr_t) irb->scsw.cmd.cpa); 3641 (void *) (addr_t) irb->scsw.cmd.cpa);
3927 if (irb->esw.esw0.erw.cons) { 3642 if (irb->esw.esw0.erw.cons) {
3928 for (sl = 0; sl < 4; sl++) { 3643 for (sl = 0; sl < 4; sl++) {
3929 len += sprintf(page + len, PRINTK_HEADER 3644 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3930 " Sense(hex) %2d-%2d:", 3645 " Sense(hex) %2d-%2d:",
3931 (8 * sl), ((8 * sl) + 7)); 3646 (8 * sl), ((8 * sl) + 7));
3932 3647
@@ -3939,23 +3654,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3939 3654
3940 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 3655 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3941 /* 24 Byte Sense Data */ 3656 /* 24 Byte Sense Data */
3942 sprintf(page + len, PRINTK_HEADER 3657 sprintf(page + len, KERN_ERR PRINTK_HEADER
3943 " 24 Byte: %x MSG %x, " 3658 " 24 Byte: %x MSG %x, "
3944 "%s MSGb to SYSOP\n", 3659 "%s MSGb to SYSOP\n",
3945 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 3660 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3946 irb->ecw[1] & 0x10 ? "" : "no"); 3661 irb->ecw[1] & 0x10 ? "" : "no");
3947 } else { 3662 } else {
3948 /* 32 Byte Sense Data */ 3663 /* 32 Byte Sense Data */
3949 sprintf(page + len, PRINTK_HEADER 3664 sprintf(page + len, KERN_ERR PRINTK_HEADER
3950 " 32 Byte: Format: %x " 3665 " 32 Byte: Format: %x "
3951 "Exception class %x\n", 3666 "Exception class %x\n",
3952 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 3667 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
3953 } 3668 }
3954 } else { 3669 } else {
3955 sprintf(page + len, PRINTK_HEADER 3670 sprintf(page + len, KERN_ERR PRINTK_HEADER
3956 " SORRY - NO VALID SENSE AVAILABLE\n"); 3671 " SORRY - NO VALID SENSE AVAILABLE\n");
3957 } 3672 }
3958 printk(KERN_ERR "%s", page); 3673 printk("%s", page);
3959 3674
3960 if (req) { 3675 if (req) {
3961 /* req == NULL for unsolicited interrupts */ 3676 /* req == NULL for unsolicited interrupts */
@@ -3964,10 +3679,10 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3964 first = req->cpaddr; 3679 first = req->cpaddr;
3965 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 3680 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3966 to = min(first + 6, last); 3681 to = min(first + 6, last);
3967 len = sprintf(page, PRINTK_HEADER 3682 len = sprintf(page, KERN_ERR PRINTK_HEADER
3968 " Related CP in req: %p\n", req); 3683 " Related CP in req: %p\n", req);
3969 dasd_eckd_dump_ccw_range(first, to, page + len); 3684 dasd_eckd_dump_ccw_range(first, to, page + len);
3970 printk(KERN_ERR "%s", page); 3685 printk("%s", page);
3971 3686
3972 /* print failing CCW area (maximum 4) */ 3687 /* print failing CCW area (maximum 4) */
3973 /* scsw->cda is either valid or zero */ 3688 /* scsw->cda is either valid or zero */
@@ -3977,7 +3692,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3977 irb->scsw.cmd.cpa; /* failing CCW */ 3692 irb->scsw.cmd.cpa; /* failing CCW */
3978 if (from < fail - 2) { 3693 if (from < fail - 2) {
3979 from = fail - 2; /* there is a gap - print header */ 3694 from = fail - 2; /* there is a gap - print header */
3980 len += sprintf(page, PRINTK_HEADER "......\n"); 3695 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
3981 } 3696 }
3982 to = min(fail + 1, last); 3697 to = min(fail + 1, last);
3983 len += dasd_eckd_dump_ccw_range(from, to, page + len); 3698 len += dasd_eckd_dump_ccw_range(from, to, page + len);
@@ -3986,11 +3701,11 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3986 from = max(from, ++to); 3701 from = max(from, ++to);
3987 if (from < last - 1) { 3702 if (from < last - 1) {
3988 from = last - 1; /* there is a gap - print header */ 3703 from = last - 1; /* there is a gap - print header */
3989 len += sprintf(page + len, PRINTK_HEADER "......\n"); 3704 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
3990 } 3705 }
3991 len += dasd_eckd_dump_ccw_range(from, last, page + len); 3706 len += dasd_eckd_dump_ccw_range(from, last, page + len);
3992 if (len > 0) 3707 if (len > 0)
3993 printk(KERN_ERR "%s", page); 3708 printk("%s", page);
3994 } 3709 }
3995 free_page((unsigned long) page); 3710 free_page((unsigned long) page);
3996} 3711}
@@ -4014,10 +3729,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4014 return; 3729 return;
4015 } 3730 }
4016 /* dump the sense data */ 3731 /* dump the sense data */
4017 len = sprintf(page, PRINTK_HEADER 3732 len = sprintf(page, KERN_ERR PRINTK_HEADER
4018 " I/O status report for device %s:\n", 3733 " I/O status report for device %s:\n",
4019 dev_name(&device->cdev->dev)); 3734 dev_name(&device->cdev->dev));
4020 len += sprintf(page + len, PRINTK_HEADER 3735 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4021 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 3736 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
4022 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 3737 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
4023 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 3738 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
@@ -4025,7 +3740,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4025 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 3740 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
4026 irb->scsw.tm.fcxs, irb->scsw.tm.schxs, 3741 irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
4027 req ? req->intrc : 0); 3742 req ? req->intrc : 0);
4028 len += sprintf(page + len, PRINTK_HEADER 3743 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4029 " device %s: Failing TCW: %p\n", 3744 " device %s: Failing TCW: %p\n",
4030 dev_name(&device->cdev->dev), 3745 dev_name(&device->cdev->dev),
4031 (void *) (addr_t) irb->scsw.tm.tcw); 3746 (void *) (addr_t) irb->scsw.tm.tcw);
@@ -4037,42 +3752,43 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4037 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 3752 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
4038 3753
4039 if (tsb) { 3754 if (tsb) {
4040 len += sprintf(page + len, PRINTK_HEADER 3755 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4041 " tsb->length %d\n", tsb->length); 3756 " tsb->length %d\n", tsb->length);
4042 len += sprintf(page + len, PRINTK_HEADER 3757 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4043 " tsb->flags %x\n", tsb->flags); 3758 " tsb->flags %x\n", tsb->flags);
4044 len += sprintf(page + len, PRINTK_HEADER 3759 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4045 " tsb->dcw_offset %d\n", tsb->dcw_offset); 3760 " tsb->dcw_offset %d\n", tsb->dcw_offset);
4046 len += sprintf(page + len, PRINTK_HEADER 3761 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4047 " tsb->count %d\n", tsb->count); 3762 " tsb->count %d\n", tsb->count);
4048 residual = tsb->count - 28; 3763 residual = tsb->count - 28;
4049 len += sprintf(page + len, PRINTK_HEADER 3764 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4050 " residual %d\n", residual); 3765 " residual %d\n", residual);
4051 3766
4052 switch (tsb->flags & 0x07) { 3767 switch (tsb->flags & 0x07) {
4053 case 1: /* tsa_iostat */ 3768 case 1: /* tsa_iostat */
4054 len += sprintf(page + len, PRINTK_HEADER 3769 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4055 " tsb->tsa.iostat.dev_time %d\n", 3770 " tsb->tsa.iostat.dev_time %d\n",
4056 tsb->tsa.iostat.dev_time); 3771 tsb->tsa.iostat.dev_time);
4057 len += sprintf(page + len, PRINTK_HEADER 3772 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4058 " tsb->tsa.iostat.def_time %d\n", 3773 " tsb->tsa.iostat.def_time %d\n",
4059 tsb->tsa.iostat.def_time); 3774 tsb->tsa.iostat.def_time);
4060 len += sprintf(page + len, PRINTK_HEADER 3775 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4061 " tsb->tsa.iostat.queue_time %d\n", 3776 " tsb->tsa.iostat.queue_time %d\n",
4062 tsb->tsa.iostat.queue_time); 3777 tsb->tsa.iostat.queue_time);
4063 len += sprintf(page + len, PRINTK_HEADER 3778 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4064 " tsb->tsa.iostat.dev_busy_time %d\n", 3779 " tsb->tsa.iostat.dev_busy_time %d\n",
4065 tsb->tsa.iostat.dev_busy_time); 3780 tsb->tsa.iostat.dev_busy_time);
4066 len += sprintf(page + len, PRINTK_HEADER 3781 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4067 " tsb->tsa.iostat.dev_act_time %d\n", 3782 " tsb->tsa.iostat.dev_act_time %d\n",
4068 tsb->tsa.iostat.dev_act_time); 3783 tsb->tsa.iostat.dev_act_time);
4069 sense = tsb->tsa.iostat.sense; 3784 sense = tsb->tsa.iostat.sense;
4070 break; 3785 break;
4071 case 2: /* ts_ddpc */ 3786 case 2: /* ts_ddpc */
4072 len += sprintf(page + len, PRINTK_HEADER 3787 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4073 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 3788 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
4074 for (sl = 0; sl < 2; sl++) { 3789 for (sl = 0; sl < 2; sl++) {
4075 len += sprintf(page + len, PRINTK_HEADER 3790 len += sprintf(page + len,
3791 KERN_ERR PRINTK_HEADER
4076 " tsb->tsa.ddpc.rcq %2d-%2d: ", 3792 " tsb->tsa.ddpc.rcq %2d-%2d: ",
4077 (8 * sl), ((8 * sl) + 7)); 3793 (8 * sl), ((8 * sl) + 7));
4078 rcq = tsb->tsa.ddpc.rcq; 3794 rcq = tsb->tsa.ddpc.rcq;
@@ -4085,14 +3801,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4085 sense = tsb->tsa.ddpc.sense; 3801 sense = tsb->tsa.ddpc.sense;
4086 break; 3802 break;
4087 case 3: /* tsa_intrg */ 3803 case 3: /* tsa_intrg */
4088 len += sprintf(page + len, PRINTK_HEADER 3804 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
4089 " tsb->tsa.intrg.: not supportet yet\n"); 3805 " tsb->tsa.intrg.: not supportet yet \n");
4090 break; 3806 break;
4091 } 3807 }
4092 3808
4093 if (sense) { 3809 if (sense) {
4094 for (sl = 0; sl < 4; sl++) { 3810 for (sl = 0; sl < 4; sl++) {
4095 len += sprintf(page + len, PRINTK_HEADER 3811 len += sprintf(page + len,
3812 KERN_ERR PRINTK_HEADER
4096 " Sense(hex) %2d-%2d:", 3813 " Sense(hex) %2d-%2d:",
4097 (8 * sl), ((8 * sl) + 7)); 3814 (8 * sl), ((8 * sl) + 7));
4098 for (sct = 0; sct < 8; sct++) { 3815 for (sct = 0; sct < 8; sct++) {
@@ -4104,27 +3821,27 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4104 3821
4105 if (sense[27] & DASD_SENSE_BIT_0) { 3822 if (sense[27] & DASD_SENSE_BIT_0) {
4106 /* 24 Byte Sense Data */ 3823 /* 24 Byte Sense Data */
4107 sprintf(page + len, PRINTK_HEADER 3824 sprintf(page + len, KERN_ERR PRINTK_HEADER
4108 " 24 Byte: %x MSG %x, " 3825 " 24 Byte: %x MSG %x, "
4109 "%s MSGb to SYSOP\n", 3826 "%s MSGb to SYSOP\n",
4110 sense[7] >> 4, sense[7] & 0x0f, 3827 sense[7] >> 4, sense[7] & 0x0f,
4111 sense[1] & 0x10 ? "" : "no"); 3828 sense[1] & 0x10 ? "" : "no");
4112 } else { 3829 } else {
4113 /* 32 Byte Sense Data */ 3830 /* 32 Byte Sense Data */
4114 sprintf(page + len, PRINTK_HEADER 3831 sprintf(page + len, KERN_ERR PRINTK_HEADER
4115 " 32 Byte: Format: %x " 3832 " 32 Byte: Format: %x "
4116 "Exception class %x\n", 3833 "Exception class %x\n",
4117 sense[6] & 0x0f, sense[22] >> 4); 3834 sense[6] & 0x0f, sense[22] >> 4);
4118 } 3835 }
4119 } else { 3836 } else {
4120 sprintf(page + len, PRINTK_HEADER 3837 sprintf(page + len, KERN_ERR PRINTK_HEADER
4121 " SORRY - NO VALID SENSE AVAILABLE\n"); 3838 " SORRY - NO VALID SENSE AVAILABLE\n");
4122 } 3839 }
4123 } else { 3840 } else {
4124 sprintf(page + len, PRINTK_HEADER 3841 sprintf(page + len, KERN_ERR PRINTK_HEADER
4125 " SORRY - NO TSB DATA AVAILABLE\n"); 3842 " SORRY - NO TSB DATA AVAILABLE\n");
4126 } 3843 }
4127 printk(KERN_ERR "%s", page); 3844 printk("%s", page);
4128 free_page((unsigned long) page); 3845 free_page((unsigned long) page);
4129} 3846}
4130 3847
@@ -4154,14 +3871,16 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4154{ 3871{
4155 struct dasd_eckd_private *private; 3872 struct dasd_eckd_private *private;
4156 struct dasd_eckd_characteristics temp_rdc_data; 3873 struct dasd_eckd_characteristics temp_rdc_data;
4157 int rc; 3874 int is_known, rc;
4158 struct dasd_uid temp_uid; 3875 struct dasd_uid temp_uid;
4159 unsigned long flags; 3876 unsigned long flags;
4160 3877
4161 private = (struct dasd_eckd_private *) device->private; 3878 private = (struct dasd_eckd_private *) device->private;
4162 3879
4163 /* Read Configuration Data */ 3880 /* Read Configuration Data */
4164 dasd_eckd_read_conf(device); 3881 rc = dasd_eckd_read_conf(device);
3882 if (rc)
3883 goto out_err;
4165 3884
4166 dasd_eckd_get_uid(device, &temp_uid); 3885 dasd_eckd_get_uid(device, &temp_uid);
4167 /* Generate device unique id */ 3886 /* Generate device unique id */
@@ -4175,13 +3894,19 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4175 goto out_err; 3894 goto out_err;
4176 3895
4177 /* register lcu with alias handling, enable PAV if this is a new lcu */ 3896 /* register lcu with alias handling, enable PAV if this is a new lcu */
4178 rc = dasd_alias_make_device_known_to_lcu(device); 3897 is_known = dasd_alias_make_device_known_to_lcu(device);
4179 if (rc) 3898 if (is_known < 0)
4180 return rc; 3899 return is_known;
4181 dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST); 3900 if (!is_known) {
3901 dasd_eckd_validate_server(device);
3902 dasd_alias_lcu_setup_complete(device);
3903 } else
3904 dasd_alias_wait_for_lcu_setup(device);
4182 3905
4183 /* RE-Read Configuration Data */ 3906 /* RE-Read Configuration Data */
4184 dasd_eckd_read_conf(device); 3907 rc = dasd_eckd_read_conf(device);
3908 if (rc)
3909 goto out_err;
4185 3910
4186 /* Read Feature Codes */ 3911 /* Read Feature Codes */
4187 dasd_eckd_read_features(device); 3912 dasd_eckd_read_features(device);
@@ -4269,12 +3994,10 @@ static struct ccw_driver dasd_eckd_driver = {
4269 .set_online = dasd_eckd_set_online, 3994 .set_online = dasd_eckd_set_online,
4270 .notify = dasd_generic_notify, 3995 .notify = dasd_generic_notify,
4271 .path_event = dasd_generic_path_event, 3996 .path_event = dasd_generic_path_event,
4272 .shutdown = dasd_generic_shutdown,
4273 .freeze = dasd_generic_pm_freeze, 3997 .freeze = dasd_generic_pm_freeze,
4274 .thaw = dasd_generic_restore_device, 3998 .thaw = dasd_generic_restore_device,
4275 .restore = dasd_generic_restore_device, 3999 .restore = dasd_generic_restore_device,
4276 .uc_handler = dasd_generic_uc_handler, 4000 .uc_handler = dasd_generic_uc_handler,
4277 .int_class = IRQIO_DAS,
4278}; 4001};
4279 4002
4280/* 4003/*
@@ -4319,7 +4042,6 @@ static struct dasd_discipline dasd_eckd_discipline = {
4319 .restore = dasd_eckd_restore_device, 4042 .restore = dasd_eckd_restore_device,
4320 .reload = dasd_eckd_reload_device, 4043 .reload = dasd_eckd_reload_device,
4321 .get_uid = dasd_eckd_get_uid, 4044 .get_uid = dasd_eckd_get_uid,
4322 .kick_validate = dasd_eckd_kick_validate_server,
4323}; 4045};
4324 4046
4325static int __init 4047static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 2555e494591..4a688a873a7 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -1,8 +1,9 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_eckd.h
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com> 5 * Bugreports.to..: <Linux390@de.ibm.com>
5 * Copyright IBM Corp. 1999, 2000 6 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 * 7 *
7 */ 8 */
8 9
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index ff901b5509c..16c5208c3dc 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Character device driver for extended error reporting. 2 * Character device driver for extended error reporting.
3 * 3 *
4 * Copyright IBM Corp. 2005 4 * Copyright (C) 2005 IBM Corporation
5 * extended error reporting for DASD ECKD devices 5 * extended error reporting for DASD ECKD devices
6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7 */ 7 */
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index d01ef82f875..0eafe2e421e 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Copyright IBM Corp. 1999, 2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
8 * 9 *
9 */ 10 */
10 11
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 41469858434..4b71b116486 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_fba.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Bugreports.to..: <Linux390@de.ibm.com> 4 * Bugreports.to..: <Linux390@de.ibm.com>
4 * Copyright IBM Corp. 1999, 2009 5 * Copyright IBM Corp. 1999, 2009
@@ -78,7 +79,6 @@ static struct ccw_driver dasd_fba_driver = {
78 .freeze = dasd_generic_pm_freeze, 79 .freeze = dasd_generic_pm_freeze,
79 .thaw = dasd_generic_restore_device, 80 .thaw = dasd_generic_restore_device,
80 .restore = dasd_generic_restore_device, 81 .restore = dasd_generic_restore_device,
81 .int_class = IRQIO_DAS,
82}; 82};
83 83
84static void 84static void
@@ -479,19 +479,19 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
479 "No memory to dump sense data"); 479 "No memory to dump sense data");
480 return; 480 return;
481 } 481 }
482 len = sprintf(page, PRINTK_HEADER 482 len = sprintf(page, KERN_ERR PRINTK_HEADER
483 " I/O status report for device %s:\n", 483 " I/O status report for device %s:\n",
484 dev_name(&device->cdev->dev)); 484 dev_name(&device->cdev->dev));
485 len += sprintf(page + len, PRINTK_HEADER 485 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
486 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 486 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
487 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 487 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
488 len += sprintf(page + len, PRINTK_HEADER 488 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
489 " device %s: Failing CCW: %p\n", 489 " device %s: Failing CCW: %p\n",
490 dev_name(&device->cdev->dev), 490 dev_name(&device->cdev->dev),
491 (void *) (addr_t) irb->scsw.cmd.cpa); 491 (void *) (addr_t) irb->scsw.cmd.cpa);
492 if (irb->esw.esw0.erw.cons) { 492 if (irb->esw.esw0.erw.cons) {
493 for (sl = 0; sl < 4; sl++) { 493 for (sl = 0; sl < 4; sl++) {
494 len += sprintf(page + len, PRINTK_HEADER 494 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
495 " Sense(hex) %2d-%2d:", 495 " Sense(hex) %2d-%2d:",
496 (8 * sl), ((8 * sl) + 7)); 496 (8 * sl), ((8 * sl) + 7));
497 497
@@ -502,7 +502,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
502 len += sprintf(page + len, "\n"); 502 len += sprintf(page + len, "\n");
503 } 503 }
504 } else { 504 } else {
505 len += sprintf(page + len, PRINTK_HEADER 505 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
506 " SORRY - NO VALID SENSE AVAILABLE\n"); 506 " SORRY - NO VALID SENSE AVAILABLE\n");
507 } 507 }
508 printk(KERN_ERR "%s", page); 508 printk(KERN_ERR "%s", page);
@@ -512,9 +512,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
512 act = req->cpaddr; 512 act = req->cpaddr;
513 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 513 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
514 end = min(act + 8, last); 514 end = min(act + 8, last);
515 len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req); 515 len = sprintf(page, KERN_ERR PRINTK_HEADER
516 " Related CP in req: %p\n", req);
516 while (act <= end) { 517 while (act <= end) {
517 len += sprintf(page + len, PRINTK_HEADER 518 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
518 " CCW %p: %08X %08X DAT:", 519 " CCW %p: %08X %08X DAT:",
519 act, ((int *) act)[0], ((int *) act)[1]); 520 act, ((int *) act)[0], ((int *) act)[1]);
520 for (count = 0; count < 32 && count < act->count; 521 for (count = 0; count < 32 && count < act->count;
@@ -532,11 +533,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
532 len = 0; 533 len = 0;
533 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { 534 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
534 act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; 535 act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
535 len += sprintf(page + len, PRINTK_HEADER "......\n"); 536 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
536 } 537 }
537 end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); 538 end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
538 while (act <= end) { 539 while (act <= end) {
539 len += sprintf(page + len, PRINTK_HEADER 540 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
540 " CCW %p: %08X %08X DAT:", 541 " CCW %p: %08X %08X DAT:",
541 act, ((int *) act)[0], ((int *) act)[1]); 542 act, ((int *) act)[0], ((int *) act)[1]);
542 for (count = 0; count < 32 && count < act->count; 543 for (count = 0; count < 32 && count < act->count;
@@ -551,10 +552,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
551 /* print last CCWs */ 552 /* print last CCWs */
552 if (act < last - 2) { 553 if (act < last - 2) {
553 act = last - 2; 554 act = last - 2;
554 len += sprintf(page + len, PRINTK_HEADER "......\n"); 555 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
555 } 556 }
556 while (act <= last) { 557 while (act <= last) {
557 len += sprintf(page + len, PRINTK_HEADER 558 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
558 " CCW %p: %08X %08X DAT:", 559 " CCW %p: %08X %08X DAT:",
559 act, ((int *) act)[0], ((int *) act)[1]); 560 act, ((int *) act)[0], ((int *) act)[1]);
560 for (count = 0; count < 32 && count < act->count; 561 for (count = 0; count < 32 && count < act->count;
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index b5d3db0e5ef..14c910baa5f 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_fba.h
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Bugreports.to..: <Linux390@de.ibm.com> 4 * Bugreports.to..: <Linux390@de.ibm.com>
4 * Coypright IBM Corp. 1999, 2000 5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
5 * 6 *
6 */ 7 */
7 8
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f64921756ad..19a1ff03d65 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_genhd.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Copyright IBM Corp. 1999, 2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
8 * 9 *
9 * gendisk related functions for the dasd driver. 10 * gendisk related functions for the dasd driver.
10 * 11 *
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 899e3f5a56e..1dd12bd85a6 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -1,4 +1,5 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_int.h
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -9,6 +10,8 @@
9#ifndef DASD_INT_H 10#ifndef DASD_INT_H
10#define DASD_INT_H 11#define DASD_INT_H
11 12
13#ifdef __KERNEL__
14
12/* we keep old device allocation scheme; IOW, minors are still in 0..255 */ 15/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
13#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) 16#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
14#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1) 17#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -352,7 +355,6 @@ struct dasd_discipline {
352 int (*reload) (struct dasd_device *); 355 int (*reload) (struct dasd_device *);
353 356
354 int (*get_uid) (struct dasd_device *, struct dasd_uid *); 357 int (*get_uid) (struct dasd_device *, struct dasd_uid *);
355 void (*kick_validate) (struct dasd_device *);
356}; 358};
357 359
358extern struct dasd_discipline *dasd_diag_discipline_pointer; 360extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -453,7 +455,6 @@ struct dasd_device {
453 struct work_struct kick_work; 455 struct work_struct kick_work;
454 struct work_struct restore_device; 456 struct work_struct restore_device;
455 struct work_struct reload_device; 457 struct work_struct reload_device;
456 struct work_struct kick_validate;
457 struct timer_list timer; 458 struct timer_list timer;
458 459
459 debug_info_t *debug_area; 460 debug_info_t *debug_area;
@@ -515,9 +516,6 @@ struct dasd_block {
515 */ 516 */
516#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */ 517#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
517#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */ 518#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
518#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
519#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
520#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
521 519
522 520
523void dasd_put_device_wake(struct dasd_device *); 521void dasd_put_device_wake(struct dasd_device *);
@@ -645,7 +643,6 @@ struct dasd_ccw_req *
645dasd_smalloc_request(int , int, int, struct dasd_device *); 643dasd_smalloc_request(int , int, int, struct dasd_device *);
646void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *); 644void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
647void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *); 645void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
648void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
649 646
650static inline int 647static inline int
651dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device) 648dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
@@ -687,7 +684,6 @@ int dasd_generic_set_offline (struct ccw_device *cdev);
687int dasd_generic_notify(struct ccw_device *, int); 684int dasd_generic_notify(struct ccw_device *, int);
688int dasd_generic_last_path_gone(struct dasd_device *); 685int dasd_generic_last_path_gone(struct dasd_device *);
689int dasd_generic_path_operational(struct dasd_device *); 686int dasd_generic_path_operational(struct dasd_device *);
690void dasd_generic_shutdown(struct ccw_device *);
691 687
692void dasd_generic_handle_state_change(struct dasd_device *); 688void dasd_generic_handle_state_change(struct dasd_device *);
693int dasd_generic_pm_freeze(struct ccw_device *); 689int dasd_generic_pm_freeze(struct ccw_device *);
@@ -791,4 +787,6 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
791#define dasd_eer_enabled(d) (0) 787#define dasd_eer_enabled(d) (0)
792#endif /* CONFIG_DASD_ERR */ 788#endif /* CONFIG_DASD_ERR */
793 789
790#endif /* __KERNEL__ */
791
794#endif /* DASD_H */ 792#endif /* DASD_H */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 03c0e044455..f1a2016829f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_ioctl.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Copyright IBM Corp. 1999, 2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
8 * 9 *
9 * i/o controls for the dasd driver. 10 * i/o controls for the dasd driver.
10 */ 11 */
@@ -12,14 +13,12 @@
12#define KMSG_COMPONENT "dasd" 13#define KMSG_COMPONENT "dasd"
13 14
14#include <linux/interrupt.h> 15#include <linux/interrupt.h>
15#include <linux/compat.h>
16#include <linux/major.h> 16#include <linux/major.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/blkpg.h> 18#include <linux/blkpg.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <asm/compat.h> 20#include <asm/compat.h>
21#include <asm/ccwdev.h> 21#include <asm/ccwdev.h>
22#include <asm/schid.h>
23#include <asm/cmb.h> 22#include <asm/cmb.h>
24#include <asm/uaccess.h> 23#include <asm/uaccess.h>
25 24
@@ -293,12 +292,12 @@ out:
293#else 292#else
294static int dasd_ioctl_reset_profile(struct dasd_block *block) 293static int dasd_ioctl_reset_profile(struct dasd_block *block)
295{ 294{
296 return -ENOTTY; 295 return -ENOSYS;
297} 296}
298 297
299static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp) 298static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
300{ 299{
301 return -ENOTTY; 300 return -ENOSYS;
302} 301}
303#endif 302#endif
304 303
@@ -309,12 +308,11 @@ static int dasd_ioctl_information(struct dasd_block *block,
309 unsigned int cmd, void __user *argp) 308 unsigned int cmd, void __user *argp)
310{ 309{
311 struct dasd_information2_t *dasd_info; 310 struct dasd_information2_t *dasd_info;
312 struct subchannel_id sch_id;
313 struct ccw_dev_id dev_id;
314 struct dasd_device *base;
315 struct ccw_device *cdev;
316 unsigned long flags; 311 unsigned long flags;
317 int rc; 312 int rc;
313 struct dasd_device *base;
314 struct ccw_device *cdev;
315 struct ccw_dev_id dev_id;
318 316
319 base = block->base; 317 base = block->base;
320 if (!base->discipline || !base->discipline->fill_info) 318 if (!base->discipline || !base->discipline->fill_info)
@@ -332,10 +330,9 @@ static int dasd_ioctl_information(struct dasd_block *block,
332 330
333 cdev = base->cdev; 331 cdev = base->cdev;
334 ccw_device_get_id(cdev, &dev_id); 332 ccw_device_get_id(cdev, &dev_id);
335 ccw_device_get_schid(cdev, &sch_id);
336 333
337 dasd_info->devno = dev_id.devno; 334 dasd_info->devno = dev_id.devno;
338 dasd_info->schid = sch_id.sch_no; 335 dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
339 dasd_info->cu_type = cdev->id.cu_type; 336 dasd_info->cu_type = cdev->id.cu_type;
340 dasd_info->cu_model = cdev->id.cu_model; 337 dasd_info->cu_model = cdev->id.cu_model;
341 dasd_info->dev_type = cdev->id.dev_type; 338 dasd_info->dev_type = cdev->id.dev_type;
@@ -501,9 +498,12 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
501 break; 498 break;
502 default: 499 default:
503 /* if the discipline has an ioctl method try it. */ 500 /* if the discipline has an ioctl method try it. */
504 rc = -ENOTTY; 501 if (base->discipline->ioctl) {
505 if (base->discipline->ioctl)
506 rc = base->discipline->ioctl(block, cmd, argp); 502 rc = base->discipline->ioctl(block, cmd, argp);
503 if (rc == -ENOIOCTLCMD)
504 rc = -EINVAL;
505 } else
506 rc = -EINVAL;
507 } 507 }
508 dasd_put_device(base); 508 dasd_put_device(base);
509 return rc; 509 return rc;
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 78ac905a5b7..e12989fff4f 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * File...........: linux/drivers/s390/block/dasd_proc.c
2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Carsten Otte <Cotte@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
7 * Coypright IBM Corp. 1999, 2002 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
8 * 9 *
9 * /proc interface for the dasd driver. 10 * /proc interface for the dasd driver.
10 * 11 *
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b6ad0de0793..9b43ae94beb 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -27,7 +27,7 @@
27 27
28static int dcssblk_open(struct block_device *bdev, fmode_t mode); 28static int dcssblk_open(struct block_device *bdev, fmode_t mode);
29static int dcssblk_release(struct gendisk *disk, fmode_t mode); 29static int dcssblk_release(struct gendisk *disk, fmode_t mode);
30static void dcssblk_make_request(struct request_queue *q, struct bio *bio); 30static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
31static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, 31static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
32 void **kaddr, unsigned long *pfn); 32 void **kaddr, unsigned long *pfn);
33 33
@@ -69,9 +69,23 @@ static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *a
69 size_t count); 69 size_t count);
70static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, 70static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
71 size_t count); 71 size_t count);
72static ssize_t dcssblk_save_store(struct device * dev, struct device_attribute *attr, const char * buf,
73 size_t count);
74static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf);
75static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf,
76 size_t count);
77static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf);
78static ssize_t dcssblk_seglist_show(struct device *dev,
79 struct device_attribute *attr,
80 char *buf);
72 81
73static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); 82static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
74static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); 83static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
84static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
85 dcssblk_save_store);
86static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
87 dcssblk_shared_store);
88static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
75 89
76static struct device *dcssblk_root_dev; 90static struct device *dcssblk_root_dev;
77 91
@@ -402,8 +416,6 @@ out:
402 up_write(&dcssblk_devices_sem); 416 up_write(&dcssblk_devices_sem);
403 return rc; 417 return rc;
404} 418}
405static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
406 dcssblk_shared_store);
407 419
408/* 420/*
409 * device attribute for save operation on current copy 421 * device attribute for save operation on current copy
@@ -464,8 +476,6 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char
464 up_write(&dcssblk_devices_sem); 476 up_write(&dcssblk_devices_sem);
465 return count; 477 return count;
466} 478}
467static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
468 dcssblk_save_store);
469 479
470/* 480/*
471 * device attribute for showing all segments in a device 481 * device attribute for showing all segments in a device
@@ -492,21 +502,6 @@ dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
492 up_read(&dcssblk_devices_sem); 502 up_read(&dcssblk_devices_sem);
493 return i; 503 return i;
494} 504}
495static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
496
497static struct attribute *dcssblk_dev_attrs[] = {
498 &dev_attr_shared.attr,
499 &dev_attr_save.attr,
500 &dev_attr_seglist.attr,
501 NULL,
502};
503static struct attribute_group dcssblk_dev_attr_group = {
504 .attrs = dcssblk_dev_attrs,
505};
506static const struct attribute_group *dcssblk_dev_attr_groups[] = {
507 &dcssblk_dev_attr_group,
508 NULL,
509};
510 505
511/* 506/*
512 * device attribute for adding devices 507 * device attribute for adding devices
@@ -595,7 +590,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
595 590
596 dev_set_name(&dev_info->dev, dev_info->segment_name); 591 dev_set_name(&dev_info->dev, dev_info->segment_name);
597 dev_info->dev.release = dcssblk_release_segment; 592 dev_info->dev.release = dcssblk_release_segment;
598 dev_info->dev.groups = dcssblk_dev_attr_groups;
599 INIT_LIST_HEAD(&dev_info->lh); 593 INIT_LIST_HEAD(&dev_info->lh);
600 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); 594 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
601 if (dev_info->gd == NULL) { 595 if (dev_info->gd == NULL) {
@@ -643,10 +637,21 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
643 * register the device 637 * register the device
644 */ 638 */
645 rc = device_register(&dev_info->dev); 639 rc = device_register(&dev_info->dev);
640 if (rc) {
641 module_put(THIS_MODULE);
642 goto dev_list_del;
643 }
644 get_device(&dev_info->dev);
645 rc = device_create_file(&dev_info->dev, &dev_attr_shared);
646 if (rc)
647 goto unregister_dev;
648 rc = device_create_file(&dev_info->dev, &dev_attr_save);
649 if (rc)
650 goto unregister_dev;
651 rc = device_create_file(&dev_info->dev, &dev_attr_seglist);
646 if (rc) 652 if (rc)
647 goto put_dev; 653 goto unregister_dev;
648 654
649 get_device(&dev_info->dev);
650 add_disk(dev_info->gd); 655 add_disk(dev_info->gd);
651 656
652 switch (dev_info->segment_type) { 657 switch (dev_info->segment_type) {
@@ -663,11 +668,12 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
663 rc = count; 668 rc = count;
664 goto out; 669 goto out;
665 670
666put_dev: 671unregister_dev:
667 list_del(&dev_info->lh); 672 list_del(&dev_info->lh);
668 blk_cleanup_queue(dev_info->dcssblk_queue); 673 blk_cleanup_queue(dev_info->dcssblk_queue);
669 dev_info->gd->queue = NULL; 674 dev_info->gd->queue = NULL;
670 put_disk(dev_info->gd); 675 put_disk(dev_info->gd);
676 device_unregister(&dev_info->dev);
671 list_for_each_entry(seg_info, &dev_info->seg_list, lh) { 677 list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
672 segment_unload(seg_info->segment_name); 678 segment_unload(seg_info->segment_name);
673 } 679 }
@@ -808,7 +814,7 @@ out:
808 return rc; 814 return rc;
809} 815}
810 816
811static void 817static int
812dcssblk_make_request(struct request_queue *q, struct bio *bio) 818dcssblk_make_request(struct request_queue *q, struct bio *bio)
813{ 819{
814 struct dcssblk_dev_info *dev_info; 820 struct dcssblk_dev_info *dev_info;
@@ -865,9 +871,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
865 bytes_done += bvec->bv_len; 871 bytes_done += bvec->bv_len;
866 } 872 }
867 bio_endio(bio, 0); 873 bio_endio(bio, 0);
868 return; 874 return 0;
869fail: 875fail:
870 bio_io_error(bio); 876 bio_io_error(bio);
877 return 0;
871} 878}
872 879
873static int 880static int
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
deleted file mode 100644
index 9978ad4433c..00000000000
--- a/drivers/s390/block/scm_blk.c
+++ /dev/null
@@ -1,445 +0,0 @@
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
13#include <linux/module.h>
14#include <linux/blkdev.h>
15#include <linux/genhd.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <asm/eadm.h>
19#include "scm_blk.h"
20
21debug_info_t *scm_debug;
22static int scm_major;
23static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64;
26static atomic_t nr_devices = ATOMIC_INIT(0);
27module_param(nr_requests, uint, S_IRUGO);
28MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29
30MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31MODULE_LICENSE("GPL");
32MODULE_ALIAS("scm:scmdev*");
33
34static void __scm_free_rq(struct scm_request *scmrq)
35{
36 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37
38 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 __scm_free_rq_cluster(scmrq);
41 kfree(aobrq);
42}
43
44static void scm_free_rqs(void)
45{
46 struct list_head *iter, *safe;
47 struct scm_request *scmrq;
48
49 spin_lock_irq(&list_lock);
50 list_for_each_safe(iter, safe, &inactive_requests) {
51 scmrq = list_entry(iter, struct scm_request, list);
52 list_del(&scmrq->list);
53 __scm_free_rq(scmrq);
54 }
55 spin_unlock_irq(&list_lock);
56}
57
58static int __scm_alloc_rq(void)
59{
60 struct aob_rq_header *aobrq;
61 struct scm_request *scmrq;
62
63 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
64 if (!aobrq)
65 return -ENOMEM;
66
67 scmrq = (void *) aobrq->data;
68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70 if (!scmrq->aob || !scmrq->aidaw) {
71 __scm_free_rq(scmrq);
72 return -ENOMEM;
73 }
74
75 if (__scm_alloc_rq_cluster(scmrq)) {
76 __scm_free_rq(scmrq);
77 return -ENOMEM;
78 }
79
80 INIT_LIST_HEAD(&scmrq->list);
81 spin_lock_irq(&list_lock);
82 list_add(&scmrq->list, &inactive_requests);
83 spin_unlock_irq(&list_lock);
84
85 return 0;
86}
87
88static int scm_alloc_rqs(unsigned int nrqs)
89{
90 int ret = 0;
91
92 while (nrqs-- && !ret)
93 ret = __scm_alloc_rq();
94
95 return ret;
96}
97
98static struct scm_request *scm_request_fetch(void)
99{
100 struct scm_request *scmrq = NULL;
101
102 spin_lock(&list_lock);
103 if (list_empty(&inactive_requests))
104 goto out;
105 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
106 list_del(&scmrq->list);
107out:
108 spin_unlock(&list_lock);
109 return scmrq;
110}
111
112static void scm_request_done(struct scm_request *scmrq)
113{
114 unsigned long flags;
115
116 spin_lock_irqsave(&list_lock, flags);
117 list_add(&scmrq->list, &inactive_requests);
118 spin_unlock_irqrestore(&list_lock, flags);
119}
120
121static int scm_open(struct block_device *blkdev, fmode_t mode)
122{
123 return scm_get_ref();
124}
125
126static int scm_release(struct gendisk *gendisk, fmode_t mode)
127{
128 scm_put_ref();
129 return 0;
130}
131
132static const struct block_device_operations scm_blk_devops = {
133 .owner = THIS_MODULE,
134 .open = scm_open,
135 .release = scm_release,
136};
137
138static void scm_request_prepare(struct scm_request *scmrq)
139{
140 struct scm_blk_dev *bdev = scmrq->bdev;
141 struct scm_device *scmdev = bdev->gendisk->private_data;
142 struct aidaw *aidaw = scmrq->aidaw;
143 struct msb *msb = &scmrq->aob->msb[0];
144 struct req_iterator iter;
145 struct bio_vec *bv;
146
147 msb->bs = MSB_BS_4K;
148 scmrq->aob->request.msb_count = 1;
149 msb->scm_addr = scmdev->address +
150 ((u64) blk_rq_pos(scmrq->request) << 9);
151 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
152 MSB_OC_READ : MSB_OC_WRITE;
153 msb->flags |= MSB_FLAG_IDA;
154 msb->data_addr = (u64) aidaw;
155
156 rq_for_each_segment(bv, scmrq->request, iter) {
157 WARN_ON(bv->bv_offset);
158 msb->blk_count += bv->bv_len >> 12;
159 aidaw->data_addr = (u64) page_address(bv->bv_page);
160 aidaw++;
161 }
162}
163
164static inline void scm_request_init(struct scm_blk_dev *bdev,
165 struct scm_request *scmrq,
166 struct request *req)
167{
168 struct aob_rq_header *aobrq = to_aobrq(scmrq);
169 struct aob *aob = scmrq->aob;
170
171 memset(aob, 0, sizeof(*aob));
172 memset(scmrq->aidaw, 0, PAGE_SIZE);
173 aobrq->scmdev = bdev->scmdev;
174 aob->request.cmd_code = ARQB_CMD_MOVE;
175 aob->request.data = (u64) aobrq;
176 scmrq->request = req;
177 scmrq->bdev = bdev;
178 scmrq->retries = 4;
179 scmrq->error = 0;
180 scm_request_cluster_init(scmrq);
181}
182
183static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
184{
185 if (atomic_read(&bdev->queued_reqs)) {
186 /* Queue restart is triggered by the next interrupt. */
187 return;
188 }
189 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
190}
191
192void scm_request_requeue(struct scm_request *scmrq)
193{
194 struct scm_blk_dev *bdev = scmrq->bdev;
195
196 scm_release_cluster(scmrq);
197 blk_requeue_request(bdev->rq, scmrq->request);
198 scm_request_done(scmrq);
199 scm_ensure_queue_restart(bdev);
200}
201
202void scm_request_finish(struct scm_request *scmrq)
203{
204 scm_release_cluster(scmrq);
205 blk_end_request_all(scmrq->request, scmrq->error);
206 scm_request_done(scmrq);
207}
208
209static void scm_blk_request(struct request_queue *rq)
210{
211 struct scm_device *scmdev = rq->queuedata;
212 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
213 struct scm_request *scmrq;
214 struct request *req;
215 int ret;
216
217 while ((req = blk_peek_request(rq))) {
218 if (req->cmd_type != REQ_TYPE_FS)
219 continue;
220
221 scmrq = scm_request_fetch();
222 if (!scmrq) {
223 SCM_LOG(5, "no request");
224 scm_ensure_queue_restart(bdev);
225 return;
226 }
227 scm_request_init(bdev, scmrq, req);
228 if (!scm_reserve_cluster(scmrq)) {
229 SCM_LOG(5, "cluster busy");
230 scm_request_done(scmrq);
231 return;
232 }
233 if (scm_need_cluster_request(scmrq)) {
234 blk_start_request(req);
235 scm_initiate_cluster_request(scmrq);
236 return;
237 }
238 scm_request_prepare(scmrq);
239 blk_start_request(req);
240
241 ret = scm_start_aob(scmrq->aob);
242 if (ret) {
243 SCM_LOG(5, "no subchannel");
244 scm_request_requeue(scmrq);
245 return;
246 }
247 atomic_inc(&bdev->queued_reqs);
248 }
249}
250
251static void __scmrq_log_error(struct scm_request *scmrq)
252{
253 struct aob *aob = scmrq->aob;
254
255 if (scmrq->error == -ETIMEDOUT)
256 SCM_LOG(1, "Request timeout");
257 else {
258 SCM_LOG(1, "Request error");
259 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
260 }
261 if (scmrq->retries)
262 SCM_LOG(1, "Retry request");
263 else
264 pr_err("An I/O operation to SCM failed with rc=%d\n",
265 scmrq->error);
266}
267
268void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
269{
270 struct scm_request *scmrq = data;
271 struct scm_blk_dev *bdev = scmrq->bdev;
272
273 scmrq->error = error;
274 if (error)
275 __scmrq_log_error(scmrq);
276
277 spin_lock(&bdev->lock);
278 list_add_tail(&scmrq->list, &bdev->finished_requests);
279 spin_unlock(&bdev->lock);
280 tasklet_hi_schedule(&bdev->tasklet);
281}
282
283static void scm_blk_tasklet(struct scm_blk_dev *bdev)
284{
285 struct scm_request *scmrq;
286 unsigned long flags;
287
288 spin_lock_irqsave(&bdev->lock, flags);
289 while (!list_empty(&bdev->finished_requests)) {
290 scmrq = list_first_entry(&bdev->finished_requests,
291 struct scm_request, list);
292 list_del(&scmrq->list);
293 spin_unlock_irqrestore(&bdev->lock, flags);
294
295 if (scmrq->error && scmrq->retries-- > 0) {
296 if (scm_start_aob(scmrq->aob)) {
297 spin_lock_irqsave(&bdev->rq_lock, flags);
298 scm_request_requeue(scmrq);
299 spin_unlock_irqrestore(&bdev->rq_lock, flags);
300 }
301 /* Request restarted or requeued, handle next. */
302 spin_lock_irqsave(&bdev->lock, flags);
303 continue;
304 }
305
306 if (scm_test_cluster_request(scmrq)) {
307 scm_cluster_request_irq(scmrq);
308 spin_lock_irqsave(&bdev->lock, flags);
309 continue;
310 }
311
312 scm_request_finish(scmrq);
313 atomic_dec(&bdev->queued_reqs);
314 spin_lock_irqsave(&bdev->lock, flags);
315 }
316 spin_unlock_irqrestore(&bdev->lock, flags);
317 /* Look out for more requests. */
318 blk_run_queue(bdev->rq);
319}
320
321int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
322{
323 struct request_queue *rq;
324 int len, ret = -ENOMEM;
325 unsigned int devindex, nr_max_blk;
326
327 devindex = atomic_inc_return(&nr_devices) - 1;
328 /* scma..scmz + scmaa..scmzz */
329 if (devindex > 701) {
330 ret = -ENODEV;
331 goto out;
332 }
333
334 bdev->scmdev = scmdev;
335 spin_lock_init(&bdev->rq_lock);
336 spin_lock_init(&bdev->lock);
337 INIT_LIST_HEAD(&bdev->finished_requests);
338 atomic_set(&bdev->queued_reqs, 0);
339 tasklet_init(&bdev->tasklet,
340 (void (*)(unsigned long)) scm_blk_tasklet,
341 (unsigned long) bdev);
342
343 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
344 if (!rq)
345 goto out;
346
347 bdev->rq = rq;
348 nr_max_blk = min(scmdev->nr_max_block,
349 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
350
351 blk_queue_logical_block_size(rq, 1 << 12);
352 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
353 blk_queue_max_segments(rq, nr_max_blk);
354 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
355 scm_blk_dev_cluster_setup(bdev);
356
357 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
358 if (!bdev->gendisk)
359 goto out_queue;
360
361 rq->queuedata = scmdev;
362 bdev->gendisk->driverfs_dev = &scmdev->dev;
363 bdev->gendisk->private_data = scmdev;
364 bdev->gendisk->fops = &scm_blk_devops;
365 bdev->gendisk->queue = rq;
366 bdev->gendisk->major = scm_major;
367 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
368
369 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
370 if (devindex > 25) {
371 len += snprintf(bdev->gendisk->disk_name + len,
372 DISK_NAME_LEN - len, "%c",
373 'a' + (devindex / 26) - 1);
374 devindex = devindex % 26;
375 }
376 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
377 'a' + devindex);
378
379 /* 512 byte sectors */
380 set_capacity(bdev->gendisk, scmdev->size >> 9);
381 add_disk(bdev->gendisk);
382 return 0;
383
384out_queue:
385 blk_cleanup_queue(rq);
386out:
387 atomic_dec(&nr_devices);
388 return ret;
389}
390
391void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
392{
393 tasklet_kill(&bdev->tasklet);
394 del_gendisk(bdev->gendisk);
395 blk_cleanup_queue(bdev->gendisk->queue);
396 put_disk(bdev->gendisk);
397}
398
399static int __init scm_blk_init(void)
400{
401 int ret = -EINVAL;
402
403 if (!scm_cluster_size_valid())
404 goto out;
405
406 ret = register_blkdev(0, "scm");
407 if (ret < 0)
408 goto out;
409
410 scm_major = ret;
411 if (scm_alloc_rqs(nr_requests))
412 goto out_unreg;
413
414 scm_debug = debug_register("scm_log", 16, 1, 16);
415 if (!scm_debug)
416 goto out_free;
417
418 debug_register_view(scm_debug, &debug_hex_ascii_view);
419 debug_set_level(scm_debug, 2);
420
421 ret = scm_drv_init();
422 if (ret)
423 goto out_dbf;
424
425 return ret;
426
427out_dbf:
428 debug_unregister(scm_debug);
429out_free:
430 scm_free_rqs();
431out_unreg:
432 unregister_blkdev(scm_major, "scm");
433out:
434 return ret;
435}
436module_init(scm_blk_init);
437
438static void __exit scm_blk_cleanup(void)
439{
440 scm_drv_cleanup();
441 debug_unregister(scm_debug);
442 scm_free_rqs();
443 unregister_blkdev(scm_major, "scm");
444}
445module_exit(scm_blk_cleanup);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
deleted file mode 100644
index 7ac6bad919e..00000000000
--- a/drivers/s390/block/scm_blk.h
+++ /dev/null
@@ -1,117 +0,0 @@
1#ifndef SCM_BLK_H
2#define SCM_BLK_H
3
4#include <linux/interrupt.h>
5#include <linux/spinlock.h>
6#include <linux/blkdev.h>
7#include <linux/genhd.h>
8#include <linux/list.h>
9
10#include <asm/debug.h>
11#include <asm/eadm.h>
12
13#define SCM_NR_PARTS 8
14#define SCM_QUEUE_DELAY 5
15
16struct scm_blk_dev {
17 struct tasklet_struct tasklet;
18 struct request_queue *rq;
19 struct gendisk *gendisk;
20 struct scm_device *scmdev;
21 spinlock_t rq_lock; /* guard the request queue */
22 spinlock_t lock; /* guard the rest of the blockdev */
23 atomic_t queued_reqs;
24 struct list_head finished_requests;
25#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
26 struct list_head cluster_list;
27#endif
28};
29
30struct scm_request {
31 struct scm_blk_dev *bdev;
32 struct request *request;
33 struct aidaw *aidaw;
34 struct aob *aob;
35 struct list_head list;
36 u8 retries;
37 int error;
38#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
39 struct {
40 enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
41 struct list_head list;
42 void **buf;
43 } cluster;
44#endif
45};
46
47#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
48
49int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
50void scm_blk_dev_cleanup(struct scm_blk_dev *);
51void scm_blk_irq(struct scm_device *, void *, int);
52
53void scm_request_finish(struct scm_request *);
54void scm_request_requeue(struct scm_request *);
55
56int scm_drv_init(void);
57void scm_drv_cleanup(void);
58
59#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
60void __scm_free_rq_cluster(struct scm_request *);
61int __scm_alloc_rq_cluster(struct scm_request *);
62void scm_request_cluster_init(struct scm_request *);
63bool scm_reserve_cluster(struct scm_request *);
64void scm_release_cluster(struct scm_request *);
65void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
66bool scm_need_cluster_request(struct scm_request *);
67void scm_initiate_cluster_request(struct scm_request *);
68void scm_cluster_request_irq(struct scm_request *);
69bool scm_test_cluster_request(struct scm_request *);
70bool scm_cluster_size_valid(void);
71#else
72#define __scm_free_rq_cluster(scmrq) {}
73#define __scm_alloc_rq_cluster(scmrq) 0
74#define scm_request_cluster_init(scmrq) {}
75#define scm_reserve_cluster(scmrq) true
76#define scm_release_cluster(scmrq) {}
77#define scm_blk_dev_cluster_setup(bdev) {}
78#define scm_need_cluster_request(scmrq) false
79#define scm_initiate_cluster_request(scmrq) {}
80#define scm_cluster_request_irq(scmrq) {}
81#define scm_test_cluster_request(scmrq) false
82#define scm_cluster_size_valid() true
83#endif
84
85extern debug_info_t *scm_debug;
86
87#define SCM_LOG(imp, txt) do { \
88 debug_text_event(scm_debug, imp, txt); \
89 } while (0)
90
91static inline void SCM_LOG_HEX(int level, void *data, int length)
92{
93 if (level > scm_debug->level)
94 return;
95 while (length > 0) {
96 debug_event(scm_debug, level, data, length);
97 length -= scm_debug->buf_size;
98 data += scm_debug->buf_size;
99 }
100}
101
102static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
103{
104 struct {
105 u64 address;
106 u8 oper_state;
107 u8 rank;
108 } __packed data = {
109 .address = scmdev->address,
110 .oper_state = scmdev->attrs.oper_state,
111 .rank = scmdev->attrs.rank,
112 };
113
114 SCM_LOG_HEX(level, &data, sizeof(data));
115}
116
117#endif /* SCM_BLK_H */
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
deleted file mode 100644
index f4bb61b0cea..00000000000
--- a/drivers/s390/block/scm_blk_cluster.c
+++ /dev/null
@@ -1,228 +0,0 @@
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <linux/blkdev.h>
11#include <linux/genhd.h>
12#include <linux/slab.h>
13#include <linux/list.h>
14#include <asm/eadm.h>
15#include "scm_blk.h"
16
17static unsigned int write_cluster_size = 64;
18module_param(write_cluster_size, uint, S_IRUGO);
19MODULE_PARM_DESC(write_cluster_size,
20 "Number of pages used for contiguous writes.");
21
22#define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
23
24void __scm_free_rq_cluster(struct scm_request *scmrq)
25{
26 int i;
27
28 if (!scmrq->cluster.buf)
29 return;
30
31 for (i = 0; i < 2 * write_cluster_size; i++)
32 free_page((unsigned long) scmrq->cluster.buf[i]);
33
34 kfree(scmrq->cluster.buf);
35}
36
37int __scm_alloc_rq_cluster(struct scm_request *scmrq)
38{
39 int i;
40
41 scmrq->cluster.buf = kzalloc(sizeof(void *) * 2 * write_cluster_size,
42 GFP_KERNEL);
43 if (!scmrq->cluster.buf)
44 return -ENOMEM;
45
46 for (i = 0; i < 2 * write_cluster_size; i++) {
47 scmrq->cluster.buf[i] = (void *) get_zeroed_page(GFP_DMA);
48 if (!scmrq->cluster.buf[i])
49 return -ENOMEM;
50 }
51 INIT_LIST_HEAD(&scmrq->cluster.list);
52 return 0;
53}
54
55void scm_request_cluster_init(struct scm_request *scmrq)
56{
57 scmrq->cluster.state = CLUSTER_NONE;
58}
59
60static bool clusters_intersect(struct scm_request *A, struct scm_request *B)
61{
62 unsigned long firstA, lastA, firstB, lastB;
63
64 firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A->request) << 9) +
66 blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE;
67
68 firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B->request) << 9) +
70 blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE;
71
72 return (firstB <= lastA && firstA <= lastB);
73}
74
75bool scm_reserve_cluster(struct scm_request *scmrq)
76{
77 struct scm_blk_dev *bdev = scmrq->bdev;
78 struct scm_request *iter;
79
80 if (write_cluster_size == 0)
81 return true;
82
83 spin_lock(&bdev->lock);
84 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
85 if (clusters_intersect(scmrq, iter) &&
86 (rq_data_dir(scmrq->request) == WRITE ||
87 rq_data_dir(iter->request) == WRITE)) {
88 spin_unlock(&bdev->lock);
89 return false;
90 }
91 }
92 list_add(&scmrq->cluster.list, &bdev->cluster_list);
93 spin_unlock(&bdev->lock);
94
95 return true;
96}
97
98void scm_release_cluster(struct scm_request *scmrq)
99{
100 struct scm_blk_dev *bdev = scmrq->bdev;
101 unsigned long flags;
102
103 if (write_cluster_size == 0)
104 return;
105
106 spin_lock_irqsave(&bdev->lock, flags);
107 list_del(&scmrq->cluster.list);
108 spin_unlock_irqrestore(&bdev->lock, flags);
109}
110
111void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
112{
113 INIT_LIST_HEAD(&bdev->cluster_list);
114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115}
116
117static void scm_prepare_cluster_request(struct scm_request *scmrq)
118{
119 struct scm_blk_dev *bdev = scmrq->bdev;
120 struct scm_device *scmdev = bdev->gendisk->private_data;
121 struct request *req = scmrq->request;
122 struct aidaw *aidaw = scmrq->aidaw;
123 struct msb *msb = &scmrq->aob->msb[0];
124 struct req_iterator iter;
125 struct bio_vec *bv;
126 int i = 0;
127 u64 addr;
128
129 switch (scmrq->cluster.state) {
130 case CLUSTER_NONE:
131 scmrq->cluster.state = CLUSTER_READ;
132 /* fall through */
133 case CLUSTER_READ:
134 scmrq->aob->request.msb_count = 1;
135 msb->bs = MSB_BS_4K;
136 msb->oc = MSB_OC_READ;
137 msb->flags = MSB_FLAG_IDA;
138 msb->data_addr = (u64) aidaw;
139 msb->blk_count = write_cluster_size;
140
141 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
142 msb->scm_addr = round_down(addr, CLUSTER_SIZE);
143
144 if (msb->scm_addr !=
145 round_down(addr + (u64) blk_rq_bytes(req) - 1,
146 CLUSTER_SIZE))
147 msb->blk_count = 2 * write_cluster_size;
148
149 for (i = 0; i < msb->blk_count; i++) {
150 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
151 aidaw++;
152 }
153
154 break;
155 case CLUSTER_WRITE:
156 msb->oc = MSB_OC_WRITE;
157
158 for (addr = msb->scm_addr;
159 addr < scmdev->address + ((u64) blk_rq_pos(req) << 9);
160 addr += PAGE_SIZE) {
161 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
162 aidaw++;
163 i++;
164 }
165 rq_for_each_segment(bv, req, iter) {
166 aidaw->data_addr = (u64) page_address(bv->bv_page);
167 aidaw++;
168 i++;
169 }
170 for (; i < msb->blk_count; i++) {
171 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
172 aidaw++;
173 }
174 break;
175 }
176}
177
178bool scm_need_cluster_request(struct scm_request *scmrq)
179{
180 if (rq_data_dir(scmrq->request) == READ)
181 return false;
182
183 return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE;
184}
185
186/* Called with queue lock held. */
187void scm_initiate_cluster_request(struct scm_request *scmrq)
188{
189 scm_prepare_cluster_request(scmrq);
190 if (scm_start_aob(scmrq->aob))
191 scm_request_requeue(scmrq);
192}
193
194bool scm_test_cluster_request(struct scm_request *scmrq)
195{
196 return scmrq->cluster.state != CLUSTER_NONE;
197}
198
199void scm_cluster_request_irq(struct scm_request *scmrq)
200{
201 struct scm_blk_dev *bdev = scmrq->bdev;
202 unsigned long flags;
203
204 switch (scmrq->cluster.state) {
205 case CLUSTER_NONE:
206 BUG();
207 break;
208 case CLUSTER_READ:
209 if (scmrq->error) {
210 scm_request_finish(scmrq);
211 break;
212 }
213 scmrq->cluster.state = CLUSTER_WRITE;
214 spin_lock_irqsave(&bdev->rq_lock, flags);
215 scm_initiate_cluster_request(scmrq);
216 spin_unlock_irqrestore(&bdev->rq_lock, flags);
217 break;
218 case CLUSTER_WRITE:
219 scm_request_finish(scmrq);
220 break;
221 }
222}
223
224bool scm_cluster_size_valid(void)
225{
226 return write_cluster_size == 0 || write_cluster_size == 32 ||
227 write_cluster_size == 64 || write_cluster_size == 128;
228}
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
deleted file mode 100644
index 9fa0a908607..00000000000
--- a/drivers/s390/block/scm_drv.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Device driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <asm/eadm.h>
14#include "scm_blk.h"
15
16static void notify(struct scm_device *scmdev)
17{
18 pr_info("%lu: The capabilities of the SCM increment changed\n",
19 (unsigned long) scmdev->address);
20 SCM_LOG(2, "State changed");
21 SCM_LOG_STATE(2, scmdev);
22}
23
24static int scm_probe(struct scm_device *scmdev)
25{
26 struct scm_blk_dev *bdev;
27 int ret;
28
29 SCM_LOG(2, "probe");
30 SCM_LOG_STATE(2, scmdev);
31
32 if (scmdev->attrs.oper_state != OP_STATE_GOOD)
33 return -EINVAL;
34
35 bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
36 if (!bdev)
37 return -ENOMEM;
38
39 dev_set_drvdata(&scmdev->dev, bdev);
40 ret = scm_blk_dev_setup(bdev, scmdev);
41 if (ret) {
42 dev_set_drvdata(&scmdev->dev, NULL);
43 kfree(bdev);
44 goto out;
45 }
46
47out:
48 return ret;
49}
50
51static int scm_remove(struct scm_device *scmdev)
52{
53 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
54
55 scm_blk_dev_cleanup(bdev);
56 dev_set_drvdata(&scmdev->dev, NULL);
57 kfree(bdev);
58
59 return 0;
60}
61
62static struct scm_driver scm_drv = {
63 .drv = {
64 .name = "scm_block",
65 .owner = THIS_MODULE,
66 },
67 .notify = notify,
68 .probe = scm_probe,
69 .remove = scm_remove,
70 .handler = scm_blk_irq,
71};
72
73int __init scm_drv_init(void)
74{
75 return scm_driver_register(&scm_drv);
76}
77
78void scm_drv_cleanup(void)
79{
80 scm_driver_unregister(&scm_drv);
81}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 690c3338a8a..1f6a4d894e7 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -36,7 +36,7 @@
36#include <linux/blkdev.h> 36#include <linux/blkdev.h>
37#include <linux/blkpg.h> 37#include <linux/blkpg.h>
38#include <linux/hdreg.h> /* HDIO_GETGEO */ 38#include <linux/hdreg.h> /* HDIO_GETGEO */
39#include <linux/device.h> 39#include <linux/sysdev.h>
40#include <linux/bio.h> 40#include <linux/bio.h>
41#include <linux/suspend.h> 41#include <linux/suspend.h>
42#include <linux/platform_device.h> 42#include <linux/platform_device.h>
@@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void)
181/* 181/*
182 * Block device make request function. 182 * Block device make request function.
183 */ 183 */
184static void xpram_make_request(struct request_queue *q, struct bio *bio) 184static int xpram_make_request(struct request_queue *q, struct bio *bio)
185{ 185{
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
187 struct bio_vec *bvec; 187 struct bio_vec *bvec;
@@ -221,9 +221,10 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio)
221 } 221 }
222 set_bit(BIO_UPTODATE, &bio->bi_flags); 222 set_bit(BIO_UPTODATE, &bio->bi_flags);
223 bio_endio(bio, 0); 223 bio_endio(bio, 0);
224 return; 224 return 0;
225fail: 225fail:
226 bio_io_error(bio); 226 bio_io_error(bio);
227 return 0;
227} 228}
228 229
229static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) 230static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 33b7141a182..694464c65fc 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -9,6 +9,7 @@
9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> 9 * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu>
10 */ 10 */
11 11
12#include <linux/kernel_stat.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/types.h> 14#include <linux/types.h>
14#include <linux/kdev_t.h> 15#include <linux/kdev_t.h>
@@ -20,7 +21,6 @@
20#include <linux/interrupt.h> 21#include <linux/interrupt.h>
21#include <linux/err.h> 22#include <linux/err.h>
22#include <linux/reboot.h> 23#include <linux/reboot.h>
23#include <linux/serial.h> /* ASYNC_* flags */
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <asm/ccwdev.h> 25#include <asm/ccwdev.h>
26#include <asm/cio.h> 26#include <asm/cio.h>
@@ -45,11 +45,14 @@
45#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */ 45#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
46 46
47#define RAW3215_FIXED 1 /* 3215 console device is not be freed */ 47#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
48#define RAW3215_ACTIVE 2 /* set if the device is in use */
48#define RAW3215_WORKING 4 /* set if a request is being worked on */ 49#define RAW3215_WORKING 4 /* set if a request is being worked on */
49#define RAW3215_THROTTLED 8 /* set if reading is disabled */ 50#define RAW3215_THROTTLED 8 /* set if reading is disabled */
50#define RAW3215_STOPPED 16 /* set if writing is disabled */ 51#define RAW3215_STOPPED 16 /* set if writing is disabled */
52#define RAW3215_CLOSING 32 /* set while in close process */
51#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */ 53#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */
52#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */ 54#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */
55#define RAW3215_FROZEN 256 /* set if 3215 is frozen for suspend */
53 56
54#define TAB_STOP_SIZE 8 /* tab stop size */ 57#define TAB_STOP_SIZE 8 /* tab stop size */
55 58
@@ -74,7 +77,6 @@ struct raw3215_req {
74} __attribute__ ((aligned(8))); 77} __attribute__ ((aligned(8)));
75 78
76struct raw3215_info { 79struct raw3215_info {
77 struct tty_port port;
78 struct ccw_device *cdev; /* device for tty driver */ 80 struct ccw_device *cdev; /* device for tty driver */
79 spinlock_t *lock; /* pointer to irq lock */ 81 spinlock_t *lock; /* pointer to irq lock */
80 int flags; /* state flags */ 82 int flags; /* state flags */
@@ -83,9 +85,9 @@ struct raw3215_info {
83 int head; /* first free byte in output buffer */ 85 int head; /* first free byte in output buffer */
84 int count; /* number of bytes in output buffer */ 86 int count; /* number of bytes in output buffer */
85 int written; /* number of bytes in write requests */ 87 int written; /* number of bytes in write requests */
88 struct tty_struct *tty; /* pointer to tty structure if present */
86 struct raw3215_req *queued_read; /* pointer to queued read requests */ 89 struct raw3215_req *queued_read; /* pointer to queued read requests */
87 struct raw3215_req *queued_write;/* pointer to queued write requests */ 90 struct raw3215_req *queued_write;/* pointer to queued write requests */
88 struct tasklet_struct tlet; /* tasklet to invoke tty_wakeup */
89 wait_queue_head_t empty_wait; /* wait queue for flushing */ 91 wait_queue_head_t empty_wait; /* wait queue for flushing */
90 struct timer_list timer; /* timer for delayed output */ 92 struct timer_list timer; /* timer for delayed output */
91 int line_pos; /* position on the line (for tabs) */ 93 int line_pos; /* position on the line (for tabs) */
@@ -291,7 +293,7 @@ static void raw3215_timeout(unsigned long __data)
291 if (raw->flags & RAW3215_TIMER_RUNS) { 293 if (raw->flags & RAW3215_TIMER_RUNS) {
292 del_timer(&raw->timer); 294 del_timer(&raw->timer);
293 raw->flags &= ~RAW3215_TIMER_RUNS; 295 raw->flags &= ~RAW3215_TIMER_RUNS;
294 if (!(raw->port.flags & ASYNC_SUSPENDED)) { 296 if (!(raw->flags & RAW3215_FROZEN)) {
295 raw3215_mk_write_req(raw); 297 raw3215_mk_write_req(raw);
296 raw3215_start_io(raw); 298 raw3215_start_io(raw);
297 } 299 }
@@ -307,8 +309,7 @@ static void raw3215_timeout(unsigned long __data)
307 */ 309 */
308static inline void raw3215_try_io(struct raw3215_info *raw) 310static inline void raw3215_try_io(struct raw3215_info *raw)
309{ 311{
310 if (!(raw->port.flags & ASYNC_INITIALIZED) || 312 if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN))
311 (raw->port.flags & ASYNC_SUSPENDED))
312 return; 313 return;
313 if (raw->queued_read != NULL) 314 if (raw->queued_read != NULL)
314 raw3215_start_io(raw); 315 raw3215_start_io(raw);
@@ -323,7 +324,10 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
323 } 324 }
324 } else if (!(raw->flags & RAW3215_TIMER_RUNS)) { 325 } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
325 /* delay small writes */ 326 /* delay small writes */
327 init_timer(&raw->timer);
326 raw->timer.expires = RAW3215_TIMEOUT + jiffies; 328 raw->timer.expires = RAW3215_TIMEOUT + jiffies;
329 raw->timer.data = (unsigned long) raw;
330 raw->timer.function = raw3215_timeout;
327 add_timer(&raw->timer); 331 add_timer(&raw->timer);
328 raw->flags |= RAW3215_TIMER_RUNS; 332 raw->flags |= RAW3215_TIMER_RUNS;
329 } 333 }
@@ -331,29 +335,19 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
331} 335}
332 336
333/* 337/*
334 * Call tty_wakeup from tasklet context 338 * Try to start the next IO and wake up processes waiting on the tty.
335 */ 339 */
336static void raw3215_wakeup(unsigned long data) 340static void raw3215_next_io(struct raw3215_info *raw)
337{ 341{
338 struct raw3215_info *raw = (struct raw3215_info *) data;
339 struct tty_struct *tty; 342 struct tty_struct *tty;
340 343
341 tty = tty_port_tty_get(&raw->port);
342 if (tty) {
343 tty_wakeup(tty);
344 tty_kref_put(tty);
345 }
346}
347
348/*
349 * Try to start the next IO and wake up processes waiting on the tty.
350 */
351static void raw3215_next_io(struct raw3215_info *raw, struct tty_struct *tty)
352{
353 raw3215_mk_write_req(raw); 344 raw3215_mk_write_req(raw);
354 raw3215_try_io(raw); 345 raw3215_try_io(raw);
355 if (tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) 346 tty = raw->tty;
356 tasklet_schedule(&raw->tlet); 347 if (tty != NULL &&
348 RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
349 tty_wakeup(tty);
350 }
357} 351}
358 352
359/* 353/*
@@ -368,13 +362,13 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
368 int cstat, dstat; 362 int cstat, dstat;
369 int count; 363 int count;
370 364
365 kstat_cpu(smp_processor_id()).irqs[IOINT_C15]++;
371 raw = dev_get_drvdata(&cdev->dev); 366 raw = dev_get_drvdata(&cdev->dev);
372 req = (struct raw3215_req *) intparm; 367 req = (struct raw3215_req *) intparm;
373 tty = tty_port_tty_get(&raw->port);
374 cstat = irb->scsw.cmd.cstat; 368 cstat = irb->scsw.cmd.cstat;
375 dstat = irb->scsw.cmd.dstat; 369 dstat = irb->scsw.cmd.dstat;
376 if (cstat != 0) 370 if (cstat != 0)
377 raw3215_next_io(raw, tty); 371 raw3215_next_io(raw);
378 if (dstat & 0x01) { /* we got a unit exception */ 372 if (dstat & 0x01) { /* we got a unit exception */
379 dstat &= ~0x01; /* we can ignore it */ 373 dstat &= ~0x01; /* we can ignore it */
380 } 374 }
@@ -384,13 +378,13 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
384 break; 378 break;
385 /* Attention interrupt, someone hit the enter key */ 379 /* Attention interrupt, someone hit the enter key */
386 raw3215_mk_read_req(raw); 380 raw3215_mk_read_req(raw);
387 raw3215_next_io(raw, tty); 381 raw3215_next_io(raw);
388 break; 382 break;
389 case 0x08: 383 case 0x08:
390 case 0x0C: 384 case 0x0C:
391 /* Channel end interrupt. */ 385 /* Channel end interrupt. */
392 if ((raw = req->info) == NULL) 386 if ((raw = req->info) == NULL)
393 goto put_tty; /* That shouldn't happen ... */ 387 return; /* That shouldn't happen ... */
394 if (req->type == RAW3215_READ) { 388 if (req->type == RAW3215_READ) {
395 /* store residual count, then wait for device end */ 389 /* store residual count, then wait for device end */
396 req->residual = irb->scsw.cmd.count; 390 req->residual = irb->scsw.cmd.count;
@@ -400,10 +394,11 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
400 case 0x04: 394 case 0x04:
401 /* Device end interrupt. */ 395 /* Device end interrupt. */
402 if ((raw = req->info) == NULL) 396 if ((raw = req->info) == NULL)
403 goto put_tty; /* That shouldn't happen ... */ 397 return; /* That shouldn't happen ... */
404 if (req->type == RAW3215_READ && tty != NULL) { 398 if (req->type == RAW3215_READ && raw->tty != NULL) {
405 unsigned int cchar; 399 unsigned int cchar;
406 400
401 tty = raw->tty;
407 count = 160 - req->residual; 402 count = 160 - req->residual;
408 EBCASC(raw->inbuf, count); 403 EBCASC(raw->inbuf, count);
409 cchar = ctrlchar_handle(raw->inbuf, count, tty); 404 cchar = ctrlchar_handle(raw->inbuf, count, tty);
@@ -413,7 +408,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
413 408
414 case CTRLCHAR_CTRL: 409 case CTRLCHAR_CTRL:
415 tty_insert_flip_char(tty, cchar, TTY_NORMAL); 410 tty_insert_flip_char(tty, cchar, TTY_NORMAL);
416 tty_flip_buffer_push(tty); 411 tty_flip_buffer_push(raw->tty);
417 break; 412 break;
418 413
419 case CTRLCHAR_NONE: 414 case CTRLCHAR_NONE:
@@ -426,7 +421,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
426 } else 421 } else
427 count -= 2; 422 count -= 2;
428 tty_insert_flip_string(tty, raw->inbuf, count); 423 tty_insert_flip_string(tty, raw->inbuf, count);
429 tty_flip_buffer_push(tty); 424 tty_flip_buffer_push(raw->tty);
430 break; 425 break;
431 } 426 }
432 } else if (req->type == RAW3215_WRITE) { 427 } else if (req->type == RAW3215_WRITE) {
@@ -441,7 +436,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
441 raw->queued_read == NULL) { 436 raw->queued_read == NULL) {
442 wake_up_interruptible(&raw->empty_wait); 437 wake_up_interruptible(&raw->empty_wait);
443 } 438 }
444 raw3215_next_io(raw, tty); 439 raw3215_next_io(raw);
445 break; 440 break;
446 default: 441 default:
447 /* Strange interrupt, I'll do my best to clean up */ 442 /* Strange interrupt, I'll do my best to clean up */
@@ -453,10 +448,9 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
453 raw->flags &= ~RAW3215_WORKING; 448 raw->flags &= ~RAW3215_WORKING;
454 raw3215_free_req(req); 449 raw3215_free_req(req);
455 } 450 }
456 raw3215_next_io(raw, tty); 451 raw3215_next_io(raw);
457 } 452 }
458put_tty: 453 return;
459 tty_kref_put(tty);
460} 454}
461 455
462/* 456/*
@@ -490,7 +484,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
490 /* While console is frozen for suspend we have no other 484 /* While console is frozen for suspend we have no other
491 * choice but to drop message from the buffer to make 485 * choice but to drop message from the buffer to make
492 * room for even more messages. */ 486 * room for even more messages. */
493 if (raw->port.flags & ASYNC_SUSPENDED) { 487 if (raw->flags & RAW3215_FROZEN) {
494 raw3215_drop_line(raw); 488 raw3215_drop_line(raw);
495 continue; 489 continue;
496 } 490 }
@@ -612,10 +606,10 @@ static int raw3215_startup(struct raw3215_info *raw)
612{ 606{
613 unsigned long flags; 607 unsigned long flags;
614 608
615 if (raw->port.flags & ASYNC_INITIALIZED) 609 if (raw->flags & RAW3215_ACTIVE)
616 return 0; 610 return 0;
617 raw->line_pos = 0; 611 raw->line_pos = 0;
618 raw->port.flags |= ASYNC_INITIALIZED; 612 raw->flags |= RAW3215_ACTIVE;
619 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 613 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
620 raw3215_try_io(raw); 614 raw3215_try_io(raw);
621 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 615 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -631,15 +625,14 @@ static void raw3215_shutdown(struct raw3215_info *raw)
631 DECLARE_WAITQUEUE(wait, current); 625 DECLARE_WAITQUEUE(wait, current);
632 unsigned long flags; 626 unsigned long flags;
633 627
634 if (!(raw->port.flags & ASYNC_INITIALIZED) || 628 if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
635 (raw->flags & RAW3215_FIXED))
636 return; 629 return;
637 /* Wait for outstanding requests, then free irq */ 630 /* Wait for outstanding requests, then free irq */
638 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 631 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
639 if ((raw->flags & RAW3215_WORKING) || 632 if ((raw->flags & RAW3215_WORKING) ||
640 raw->queued_write != NULL || 633 raw->queued_write != NULL ||
641 raw->queued_read != NULL) { 634 raw->queued_read != NULL) {
642 raw->port.flags |= ASYNC_CLOSING; 635 raw->flags |= RAW3215_CLOSING;
643 add_wait_queue(&raw->empty_wait, &wait); 636 add_wait_queue(&raw->empty_wait, &wait);
644 set_current_state(TASK_INTERRUPTIBLE); 637 set_current_state(TASK_INTERRUPTIBLE);
645 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 638 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -647,42 +640,11 @@ static void raw3215_shutdown(struct raw3215_info *raw)
647 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 640 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
648 remove_wait_queue(&raw->empty_wait, &wait); 641 remove_wait_queue(&raw->empty_wait, &wait);
649 set_current_state(TASK_RUNNING); 642 set_current_state(TASK_RUNNING);
650 raw->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING); 643 raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
651 } 644 }
652 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 645 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
653} 646}
654 647
655static struct raw3215_info *raw3215_alloc_info(void)
656{
657 struct raw3215_info *info;
658
659 info = kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
660 if (!info)
661 return NULL;
662
663 info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
664 info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
665 if (!info->buffer || !info->inbuf) {
666 kfree(info);
667 return NULL;
668 }
669
670 setup_timer(&info->timer, raw3215_timeout, (unsigned long)info);
671 init_waitqueue_head(&info->empty_wait);
672 tasklet_init(&info->tlet, raw3215_wakeup, (unsigned long)info);
673 tty_port_init(&info->port);
674
675 return info;
676}
677
678static void raw3215_free_info(struct raw3215_info *raw)
679{
680 kfree(raw->inbuf);
681 kfree(raw->buffer);
682 tty_port_destroy(&raw->port);
683 kfree(raw);
684}
685
686static int raw3215_probe (struct ccw_device *cdev) 648static int raw3215_probe (struct ccw_device *cdev)
687{ 649{
688 struct raw3215_info *raw; 650 struct raw3215_info *raw;
@@ -691,15 +653,11 @@ static int raw3215_probe (struct ccw_device *cdev)
691 /* Console is special. */ 653 /* Console is special. */
692 if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev))) 654 if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev)))
693 return 0; 655 return 0;
694 656 raw = kmalloc(sizeof(struct raw3215_info) +
695 raw = raw3215_alloc_info(); 657 RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA);
696 if (raw == NULL) 658 if (raw == NULL)
697 return -ENOMEM; 659 return -ENOMEM;
698 660
699 raw->cdev = cdev;
700 dev_set_drvdata(&cdev->dev, raw);
701 cdev->handler = raw3215_irq;
702
703 spin_lock(&raw3215_device_lock); 661 spin_lock(&raw3215_device_lock);
704 for (line = 0; line < NR_3215; line++) { 662 for (line = 0; line < NR_3215; line++) {
705 if (!raw3215[line]) { 663 if (!raw3215[line]) {
@@ -709,29 +667,40 @@ static int raw3215_probe (struct ccw_device *cdev)
709 } 667 }
710 spin_unlock(&raw3215_device_lock); 668 spin_unlock(&raw3215_device_lock);
711 if (line == NR_3215) { 669 if (line == NR_3215) {
712 raw3215_free_info(raw); 670 kfree(raw);
713 return -ENODEV; 671 return -ENODEV;
714 } 672 }
715 673
674 raw->cdev = cdev;
675 raw->inbuf = (char *) raw + sizeof(struct raw3215_info);
676 memset(raw, 0, sizeof(struct raw3215_info));
677 raw->buffer = kmalloc(RAW3215_BUFFER_SIZE,
678 GFP_KERNEL|GFP_DMA);
679 if (raw->buffer == NULL) {
680 spin_lock(&raw3215_device_lock);
681 raw3215[line] = NULL;
682 spin_unlock(&raw3215_device_lock);
683 kfree(raw);
684 return -ENOMEM;
685 }
686 init_waitqueue_head(&raw->empty_wait);
687
688 dev_set_drvdata(&cdev->dev, raw);
689 cdev->handler = raw3215_irq;
690
716 return 0; 691 return 0;
717} 692}
718 693
719static void raw3215_remove (struct ccw_device *cdev) 694static void raw3215_remove (struct ccw_device *cdev)
720{ 695{
721 struct raw3215_info *raw; 696 struct raw3215_info *raw;
722 unsigned int line;
723 697
724 ccw_device_set_offline(cdev); 698 ccw_device_set_offline(cdev);
725 raw = dev_get_drvdata(&cdev->dev); 699 raw = dev_get_drvdata(&cdev->dev);
726 if (raw) { 700 if (raw) {
727 spin_lock(&raw3215_device_lock);
728 for (line = 0; line < NR_3215; line++)
729 if (raw3215[line] == raw)
730 break;
731 raw3215[line] = NULL;
732 spin_unlock(&raw3215_device_lock);
733 dev_set_drvdata(&cdev->dev, NULL); 701 dev_set_drvdata(&cdev->dev, NULL);
734 raw3215_free_info(raw); 702 kfree(raw->buffer);
703 kfree(raw);
735 } 704 }
736} 705}
737 706
@@ -768,7 +737,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev)
768 raw = dev_get_drvdata(&cdev->dev); 737 raw = dev_get_drvdata(&cdev->dev);
769 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 738 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
770 raw3215_make_room(raw, RAW3215_BUFFER_SIZE); 739 raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
771 raw->port.flags |= ASYNC_SUSPENDED; 740 raw->flags |= RAW3215_FROZEN;
772 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 741 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
773 return 0; 742 return 0;
774} 743}
@@ -781,7 +750,7 @@ static int raw3215_pm_start(struct ccw_device *cdev)
781 /* Allow I/O again and flush output buffer. */ 750 /* Allow I/O again and flush output buffer. */
782 raw = dev_get_drvdata(&cdev->dev); 751 raw = dev_get_drvdata(&cdev->dev);
783 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 752 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
784 raw->port.flags &= ~ASYNC_SUSPENDED; 753 raw->flags &= ~RAW3215_FROZEN;
785 raw->flags |= RAW3215_FLUSHING; 754 raw->flags |= RAW3215_FLUSHING;
786 raw3215_try_io(raw); 755 raw3215_try_io(raw);
787 raw->flags &= ~RAW3215_FLUSHING; 756 raw->flags &= ~RAW3215_FLUSHING;
@@ -807,7 +776,6 @@ static struct ccw_driver raw3215_ccw_driver = {
807 .freeze = &raw3215_pm_stop, 776 .freeze = &raw3215_pm_stop,
808 .thaw = &raw3215_pm_start, 777 .thaw = &raw3215_pm_start,
809 .restore = &raw3215_pm_start, 778 .restore = &raw3215_pm_start,
810 .int_class = IRQIO_C15,
811}; 779};
812 780
813#ifdef CONFIG_TN3215_CONSOLE 781#ifdef CONFIG_TN3215_CONSOLE
@@ -854,7 +822,7 @@ static void con3215_flush(void)
854 unsigned long flags; 822 unsigned long flags;
855 823
856 raw = raw3215[0]; /* console 3215 is the first one */ 824 raw = raw3215[0]; /* console 3215 is the first one */
857 if (raw->port.flags & ASYNC_SUSPENDED) 825 if (raw->flags & RAW3215_FROZEN)
858 /* The console is still frozen for suspend. */ 826 /* The console is still frozen for suspend. */
859 if (ccw_device_force_console()) 827 if (ccw_device_force_console())
860 /* Forcing didn't work, no panic message .. */ 828 /* Forcing didn't work, no panic message .. */
@@ -924,16 +892,22 @@ static int __init con3215_init(void)
924 if (IS_ERR(cdev)) 892 if (IS_ERR(cdev))
925 return -ENODEV; 893 return -ENODEV;
926 894
927 raw3215[0] = raw = raw3215_alloc_info(); 895 raw3215[0] = raw = (struct raw3215_info *)
896 kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
897 raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
898 raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
928 raw->cdev = cdev; 899 raw->cdev = cdev;
929 dev_set_drvdata(&cdev->dev, raw); 900 dev_set_drvdata(&cdev->dev, raw);
930 cdev->handler = raw3215_irq; 901 cdev->handler = raw3215_irq;
931 902
932 raw->flags |= RAW3215_FIXED; 903 raw->flags |= RAW3215_FIXED;
904 init_waitqueue_head(&raw->empty_wait);
933 905
934 /* Request the console irq */ 906 /* Request the console irq */
935 if (raw3215_startup(raw) != 0) { 907 if (raw3215_startup(raw) != 0) {
936 raw3215_free_info(raw); 908 kfree(raw->inbuf);
909 kfree(raw->buffer);
910 kfree(raw);
937 raw3215[0] = NULL; 911 raw3215[0] = NULL;
938 return -ENODEV; 912 return -ENODEV;
939 } 913 }
@@ -945,19 +919,6 @@ static int __init con3215_init(void)
945console_initcall(con3215_init); 919console_initcall(con3215_init);
946#endif 920#endif
947 921
948static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty)
949{
950 struct raw3215_info *raw;
951
952 raw = raw3215[tty->index];
953 if (raw == NULL)
954 return -ENODEV;
955
956 tty->driver_data = raw;
957
958 return tty_port_install(&raw->port, driver, tty);
959}
960
961/* 922/*
962 * tty3215_open 923 * tty3215_open
963 * 924 *
@@ -965,10 +926,19 @@ static int tty3215_install(struct tty_driver *driver, struct tty_struct *tty)
965 */ 926 */
966static int tty3215_open(struct tty_struct *tty, struct file * filp) 927static int tty3215_open(struct tty_struct *tty, struct file * filp)
967{ 928{
968 struct raw3215_info *raw = tty->driver_data; 929 struct raw3215_info *raw;
969 int retval; 930 int retval, line;
931
932 line = tty->index;
933 if ((line < 0) || (line >= NR_3215))
934 return -ENODEV;
970 935
971 tty_port_tty_set(&raw->port, tty); 936 raw = raw3215[line];
937 if (raw == NULL)
938 return -ENODEV;
939
940 tty->driver_data = raw;
941 raw->tty = tty;
972 942
973 tty->low_latency = 0; /* don't use bottom half for pushing chars */ 943 tty->low_latency = 0; /* don't use bottom half for pushing chars */
974 /* 944 /*
@@ -997,9 +967,8 @@ static void tty3215_close(struct tty_struct *tty, struct file * filp)
997 tty->closing = 1; 967 tty->closing = 1;
998 /* Shutdown the terminal */ 968 /* Shutdown the terminal */
999 raw3215_shutdown(raw); 969 raw3215_shutdown(raw);
1000 tasklet_kill(&raw->tlet);
1001 tty->closing = 0; 970 tty->closing = 0;
1002 tty_port_tty_set(&raw->port, NULL); 971 raw->tty = NULL;
1003} 972}
1004 973
1005/* 974/*
@@ -1128,7 +1097,6 @@ static void tty3215_start(struct tty_struct *tty)
1128} 1097}
1129 1098
1130static const struct tty_operations tty3215_ops = { 1099static const struct tty_operations tty3215_ops = {
1131 .install = tty3215_install,
1132 .open = tty3215_open, 1100 .open = tty3215_open,
1133 .close = tty3215_close, 1101 .close = tty3215_close,
1134 .write = tty3215_write, 1102 .write = tty3215_write,
@@ -1170,6 +1138,7 @@ static int __init tty3215_init(void)
1170 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc 1138 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
1171 */ 1139 */
1172 1140
1141 driver->owner = THIS_MODULE;
1173 driver->driver_name = "tty3215"; 1142 driver->driver_name = "tty3215";
1174 driver->name = "ttyS"; 1143 driver->name = "ttyS";
1175 driver->major = TTY_MAJOR; 1144 driver->major = TTY_MAJOR;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 699fd3e363d..bb07577e8fd 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -35,6 +35,7 @@ static struct raw3270_fn con3270_fn;
35 */ 35 */
36struct con3270 { 36struct con3270 {
37 struct raw3270_view view; 37 struct raw3270_view view;
38 spinlock_t lock;
38 struct list_head freemem; /* list of free memory for strings. */ 39 struct list_head freemem; /* list of free memory for strings. */
39 40
40 /* Output stuff. */ 41 /* Output stuff. */
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index 8de2deb176d..0e9a309b966 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/char/ctrlchar.c
2 * Unified handling of special chars. 3 * Unified handling of special chars.
3 * 4 *
4 * Copyright IBM Corp. 2001 5 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com> 6 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
6 * 7 *
7 */ 8 */
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
index 1a53552f498..935ffa0ea7c 100644
--- a/drivers/s390/char/ctrlchar.h
+++ b/drivers/s390/char/ctrlchar.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/char/ctrlchar.c
2 * Unified handling of special chars. 3 * Unified handling of special chars.
3 * 4 *
4 * Copyright IBM Corp. 2001 5 * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com> 6 * Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
6 * 7 *
7 */ 8 */
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 911704571b9..f6489eb7e97 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -11,8 +11,6 @@
11#include <linux/console.h> 11#include <linux/console.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/compat.h>
15#include <linux/module.h>
16#include <linux/list.h> 14#include <linux/list.h>
17#include <linux/slab.h> 15#include <linux/slab.h>
18#include <linux/types.h> 16#include <linux/types.h>
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 01463b052ae..80658819248 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * drivers/s390/char/keyboard.c
2 * ebcdic keycode functions for s390 console drivers 3 * ebcdic keycode functions for s390 console drivers
3 * 4 *
4 * S390 version 5 * S390 version
5 * Copyright IBM Corp. 2003 6 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 */ 8 */
8 9
@@ -198,7 +199,7 @@ handle_diacr(struct kbd_data *kbd, unsigned int ch)
198 if (ch == ' ' || ch == d) 199 if (ch == ' ' || ch == d)
199 return d; 200 return d;
200 201
201 kbd_put_queue(kbd->port, d); 202 kbd_put_queue(kbd->tty, d);
202 return ch; 203 return ch;
203} 204}
204 205
@@ -220,7 +221,7 @@ k_self(struct kbd_data *kbd, unsigned char value)
220{ 221{
221 if (kbd->diacr) 222 if (kbd->diacr)
222 value = handle_diacr(kbd, value); 223 value = handle_diacr(kbd, value);
223 kbd_put_queue(kbd->port, value); 224 kbd_put_queue(kbd->tty, value);
224} 225}
225 226
226/* 227/*
@@ -238,7 +239,7 @@ static void
238k_fn(struct kbd_data *kbd, unsigned char value) 239k_fn(struct kbd_data *kbd, unsigned char value)
239{ 240{
240 if (kbd->func_table[value]) 241 if (kbd->func_table[value])
241 kbd_puts_queue(kbd->port, kbd->func_table[value]); 242 kbd_puts_queue(kbd->tty, kbd->func_table[value]);
242} 243}
243 244
244static void 245static void
@@ -256,20 +257,20 @@ k_spec(struct kbd_data *kbd, unsigned char value)
256 * but we need only 16 bits here 257 * but we need only 16 bits here
257 */ 258 */
258static void 259static void
259to_utf8(struct tty_port *port, ushort c) 260to_utf8(struct tty_struct *tty, ushort c)
260{ 261{
261 if (c < 0x80) 262 if (c < 0x80)
262 /* 0******* */ 263 /* 0******* */
263 kbd_put_queue(port, c); 264 kbd_put_queue(tty, c);
264 else if (c < 0x800) { 265 else if (c < 0x800) {
265 /* 110***** 10****** */ 266 /* 110***** 10****** */
266 kbd_put_queue(port, 0xc0 | (c >> 6)); 267 kbd_put_queue(tty, 0xc0 | (c >> 6));
267 kbd_put_queue(port, 0x80 | (c & 0x3f)); 268 kbd_put_queue(tty, 0x80 | (c & 0x3f));
268 } else { 269 } else {
269 /* 1110**** 10****** 10****** */ 270 /* 1110**** 10****** 10****** */
270 kbd_put_queue(port, 0xe0 | (c >> 12)); 271 kbd_put_queue(tty, 0xe0 | (c >> 12));
271 kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f)); 272 kbd_put_queue(tty, 0x80 | ((c >> 6) & 0x3f));
272 kbd_put_queue(port, 0x80 | (c & 0x3f)); 273 kbd_put_queue(tty, 0x80 | (c & 0x3f));
273 } 274 }
274} 275}
275 276
@@ -282,7 +283,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
282 unsigned short keysym; 283 unsigned short keysym;
283 unsigned char type, value; 284 unsigned char type, value;
284 285
285 if (!kbd) 286 if (!kbd || !kbd->tty)
286 return; 287 return;
287 288
288 if (keycode >= 384) 289 if (keycode >= 384)
@@ -322,7 +323,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
322#endif 323#endif
323 (*k_handler[type])(kbd, value); 324 (*k_handler[type])(kbd, value);
324 } else 325 } else
325 to_utf8(kbd->port, keysym); 326 to_utf8(kbd->tty, keysym);
326} 327}
327 328
328/* 329/*
@@ -456,7 +457,6 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
456 457
457int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg) 458int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
458{ 459{
459 struct tty_struct *tty;
460 void __user *argp; 460 void __user *argp;
461 unsigned int ct; 461 unsigned int ct;
462 int perm; 462 int perm;
@@ -467,10 +467,7 @@ int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
467 * To have permissions to do most of the vt ioctls, we either have 467 * To have permissions to do most of the vt ioctls, we either have
468 * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG. 468 * to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
469 */ 469 */
470 tty = tty_port_tty_get(kbd->port); 470 perm = current->signal->tty == kbd->tty || capable(CAP_SYS_TTY_CONFIG);
471 /* FIXME this test is pretty racy */
472 perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG);
473 tty_kref_put(tty);
474 switch (cmd) { 471 switch (cmd) {
475 case KDGKBTYPE: 472 case KDGKBTYPE:
476 return put_user(KB_101, (char __user *)argp); 473 return put_user(KB_101, (char __user *)argp);
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index d0ae2be5819..7e736aaeae6 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/char/keyboard.h
2 * ebcdic keycode functions for s390 console drivers 3 * ebcdic keycode functions for s390 console drivers
3 * 4 *
4 * Copyright IBM Corp. 2003 5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 */ 7 */
7 8
@@ -20,7 +21,7 @@ typedef void (fn_handler_fn)(struct kbd_data *);
20 */ 21 */
21 22
22struct kbd_data { 23struct kbd_data {
23 struct tty_port *port; 24 struct tty_struct *tty;
24 unsigned short **key_maps; 25 unsigned short **key_maps;
25 char **func_table; 26 char **func_table;
26 fn_handler_fn **fn_handler; 27 fn_handler_fn **fn_handler;
@@ -41,24 +42,16 @@ int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
41 * Helper Functions. 42 * Helper Functions.
42 */ 43 */
43static inline void 44static inline void
44kbd_put_queue(struct tty_port *port, int ch) 45kbd_put_queue(struct tty_struct *tty, int ch)
45{ 46{
46 struct tty_struct *tty = tty_port_tty_get(port);
47 if (!tty)
48 return;
49 tty_insert_flip_char(tty, ch, 0); 47 tty_insert_flip_char(tty, ch, 0);
50 tty_schedule_flip(tty); 48 tty_schedule_flip(tty);
51 tty_kref_put(tty);
52} 49}
53 50
54static inline void 51static inline void
55kbd_puts_queue(struct tty_port *port, char *cp) 52kbd_puts_queue(struct tty_struct *tty, char *cp)
56{ 53{
57 struct tty_struct *tty = tty_port_tty_get(port);
58 if (!tty)
59 return;
60 while (*cp) 54 while (*cp)
61 tty_insert_flip_char(tty, *cp++, 0); 55 tty_insert_flip_char(tty, *cp++, 0);
62 tty_schedule_flip(tty); 56 tty_schedule_flip(tty);
63 tty_kref_put(tty);
64} 57}
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index f4ff515db25..5b8b8592d31 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -571,11 +571,8 @@ static int __init mon_init(void)
571 if (rc) 571 if (rc)
572 goto out_iucv; 572 goto out_iucv;
573 monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL); 573 monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
574 if (!monreader_device) { 574 if (!monreader_device)
575 rc = -ENOMEM;
576 goto out_driver; 575 goto out_driver;
577 }
578
579 dev_set_name(monreader_device, "monreader-dev"); 576 dev_set_name(monreader_device, "monreader-dev");
580 monreader_device->bus = &iucv_bus; 577 monreader_device->bus = &iucv_bus;
581 monreader_device->parent = iucv_root; 578 monreader_device->parent = iucv_root;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 9a6c140c5f0..810ac38631c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,6 +7,7 @@
7 * Copyright IBM Corp. 2003, 2009 7 * Copyright IBM Corp. 2003, 2009
8 */ 8 */
9 9
10#include <linux/kernel_stat.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/err.h> 12#include <linux/err.h>
12#include <linux/init.h> 13#include <linux/init.h>
@@ -75,7 +76,7 @@ static LIST_HEAD(raw3270_devices);
75static int raw3270_registered; 76static int raw3270_registered;
76 77
77/* Module parameters */ 78/* Module parameters */
78static bool tubxcorrect = 0; 79static int tubxcorrect = 0;
79module_param(tubxcorrect, bool, 0); 80module_param(tubxcorrect, bool, 0);
80 81
81/* 82/*
@@ -329,6 +330,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
329 struct raw3270_request *rq; 330 struct raw3270_request *rq;
330 int rc; 331 int rc;
331 332
333 kstat_cpu(smp_processor_id()).irqs[IOINT_C70]++;
332 rp = dev_get_drvdata(&cdev->dev); 334 rp = dev_get_drvdata(&cdev->dev);
333 if (!rp) 335 if (!rp)
334 return; 336 return;
@@ -1396,7 +1398,6 @@ static struct ccw_driver raw3270_ccw_driver = {
1396 .freeze = &raw3270_pm_stop, 1398 .freeze = &raw3270_pm_stop,
1397 .thaw = &raw3270_pm_start, 1399 .thaw = &raw3270_pm_start,
1398 .restore = &raw3270_pm_start, 1400 .restore = &raw3270_pm_start,
1399 .int_class = IRQIO_C70,
1400}; 1401};
1401 1402
1402static int 1403static int
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 12c16a65dd2..eaa7e78186f 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -334,7 +334,7 @@ sclp_dispatch_evbufs(struct sccb_header *sccb)
334 reg->receiver_fn(evbuf); 334 reg->receiver_fn(evbuf);
335 spin_lock_irqsave(&sclp_lock, flags); 335 spin_lock_irqsave(&sclp_lock, flags);
336 } else if (reg == NULL) 336 } else if (reg == NULL)
337 rc = -EOPNOTSUPP; 337 rc = -ENOSYS;
338 } 338 }
339 spin_unlock_irqrestore(&sclp_lock, flags); 339 spin_unlock_irqrestore(&sclp_lock, flags);
340 return rc; 340 return rc;
@@ -393,14 +393,14 @@ __sclp_find_req(u32 sccb)
393/* Handler for external interruption. Perform request post-processing. 393/* Handler for external interruption. Perform request post-processing.
394 * Prepare read event data request if necessary. Start processing of next 394 * Prepare read event data request if necessary. Start processing of next
395 * request on queue. */ 395 * request on queue. */
396static void sclp_interrupt_handler(struct ext_code ext_code, 396static void sclp_interrupt_handler(unsigned int ext_int_code,
397 unsigned int param32, unsigned long param64) 397 unsigned int param32, unsigned long param64)
398{ 398{
399 struct sclp_req *req; 399 struct sclp_req *req;
400 u32 finished_sccb; 400 u32 finished_sccb;
401 u32 evbuf_pending; 401 u32 evbuf_pending;
402 402
403 inc_irq_stat(IRQEXT_SCP); 403 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
404 spin_lock(&sclp_lock); 404 spin_lock(&sclp_lock);
405 finished_sccb = param32 & 0xfffffff8; 405 finished_sccb = param32 & 0xfffffff8;
406 evbuf_pending = param32 & 0x3; 406 evbuf_pending = param32 & 0x3;
@@ -654,6 +654,16 @@ sclp_remove_processed(struct sccb_header *sccb)
654 654
655EXPORT_SYMBOL(sclp_remove_processed); 655EXPORT_SYMBOL(sclp_remove_processed);
656 656
657struct init_sccb {
658 struct sccb_header header;
659 u16 _reserved;
660 u16 mask_length;
661 sccb_mask_t receive_mask;
662 sccb_mask_t send_mask;
663 sccb_mask_t sclp_receive_mask;
664 sccb_mask_t sclp_send_mask;
665} __attribute__((packed));
666
657/* Prepare init mask request. Called while sclp_lock is locked. */ 667/* Prepare init mask request. Called while sclp_lock is locked. */
658static inline void 668static inline void
659__sclp_make_init_req(u32 receive_mask, u32 send_mask) 669__sclp_make_init_req(u32 receive_mask, u32 send_mask)
@@ -808,12 +818,12 @@ EXPORT_SYMBOL(sclp_reactivate);
808 818
809/* Handler for external interruption used during initialization. Modify 819/* Handler for external interruption used during initialization. Modify
810 * request state to done. */ 820 * request state to done. */
811static void sclp_check_handler(struct ext_code ext_code, 821static void sclp_check_handler(unsigned int ext_int_code,
812 unsigned int param32, unsigned long param64) 822 unsigned int param32, unsigned long param64)
813{ 823{
814 u32 finished_sccb; 824 u32 finished_sccb;
815 825
816 inc_irq_stat(IRQEXT_SCP); 826 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
817 finished_sccb = param32 & 0xfffffff8; 827 finished_sccb = param32 & 0xfffffff8;
818 /* Is this the interrupt we are waiting for? */ 828 /* Is this the interrupt we are waiting for? */
819 if (finished_sccb == 0) 829 if (finished_sccb == 0)
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 25bcd4c0ed8..49a1bb52bc8 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999,2012 2 * Copyright IBM Corp. 1999, 2009
3 * 3 *
4 * Author(s): Martin Peschke <mpeschke@de.ibm.com> 4 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -88,22 +88,11 @@ struct sccb_header {
88 u16 response_code; 88 u16 response_code;
89} __attribute__((packed)); 89} __attribute__((packed));
90 90
91struct init_sccb {
92 struct sccb_header header;
93 u16 _reserved;
94 u16 mask_length;
95 sccb_mask_t receive_mask;
96 sccb_mask_t send_mask;
97 sccb_mask_t sclp_receive_mask;
98 sccb_mask_t sclp_send_mask;
99} __attribute__((packed));
100
101extern u64 sclp_facilities; 91extern u64 sclp_facilities;
102#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL) 92#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
103#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) 93#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
104#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) 94#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
105#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) 95#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
106#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL)
107 96
108 97
109struct gds_subvector { 98struct gds_subvector {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index c44d13f607b..837e010299a 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2007,2012 2 * Copyright IBM Corp. 2007, 2009
3 * 3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -12,19 +12,15 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/export.h>
16#include <linux/slab.h> 15#include <linux/slab.h>
17#include <linux/string.h> 16#include <linux/string.h>
18#include <linux/mm.h> 17#include <linux/mm.h>
19#include <linux/mmzone.h> 18#include <linux/mmzone.h>
20#include <linux/memory.h> 19#include <linux/memory.h>
21#include <linux/module.h>
22#include <linux/platform_device.h> 20#include <linux/platform_device.h>
23#include <asm/ctl_reg.h>
24#include <asm/chpid.h> 21#include <asm/chpid.h>
25#include <asm/setup.h>
26#include <asm/page.h>
27#include <asm/sclp.h> 22#include <asm/sclp.h>
23#include <asm/setup.h>
28 24
29#include "sclp.h" 25#include "sclp.h"
30 26
@@ -41,8 +37,7 @@ struct read_info_sccb {
41 u64 facilities; /* 48-55 */ 37 u64 facilities; /* 48-55 */
42 u8 _reserved2[84 - 56]; /* 56-83 */ 38 u8 _reserved2[84 - 56]; /* 56-83 */
43 u8 fac84; /* 84 */ 39 u8 fac84; /* 84 */
44 u8 fac85; /* 85 */ 40 u8 _reserved3[91 - 85]; /* 85-90 */
45 u8 _reserved3[91 - 86]; /* 86-90 */
46 u8 flags; /* 91 */ 41 u8 flags; /* 91 */
47 u8 _reserved4[100 - 92]; /* 92-99 */ 42 u8 _reserved4[100 - 92]; /* 92-99 */
48 u32 rnsize2; /* 100-103 */ 43 u32 rnsize2; /* 100-103 */
@@ -50,13 +45,11 @@ struct read_info_sccb {
50 u8 _reserved5[4096 - 112]; /* 112-4095 */ 45 u8 _reserved5[4096 - 112]; /* 112-4095 */
51} __attribute__((packed, aligned(PAGE_SIZE))); 46} __attribute__((packed, aligned(PAGE_SIZE)));
52 47
53static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE);
54static struct read_info_sccb __initdata early_read_info_sccb; 48static struct read_info_sccb __initdata early_read_info_sccb;
55static int __initdata early_read_info_sccb_valid; 49static int __initdata early_read_info_sccb_valid;
56 50
57u64 sclp_facilities; 51u64 sclp_facilities;
58static u8 sclp_fac84; 52static u8 sclp_fac84;
59static u8 sclp_fac85;
60static unsigned long long rzm; 53static unsigned long long rzm;
61static unsigned long long rnmax; 54static unsigned long long rnmax;
62 55
@@ -68,8 +61,8 @@ static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
68 rc = sclp_service_call(cmd, sccb); 61 rc = sclp_service_call(cmd, sccb);
69 if (rc) 62 if (rc)
70 goto out; 63 goto out;
71 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | 64 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
72 PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT); 65 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
73 local_irq_disable(); 66 local_irq_disable();
74out: 67out:
75 /* Contents of the sccb might have changed. */ 68 /* Contents of the sccb might have changed. */
@@ -107,19 +100,6 @@ static void __init sclp_read_info_early(void)
107 } 100 }
108} 101}
109 102
110static void __init sclp_event_mask_early(void)
111{
112 struct init_sccb *sccb = &early_event_mask_sccb;
113 int rc;
114
115 do {
116 memset(sccb, 0, sizeof(*sccb));
117 sccb->header.length = sizeof(*sccb);
118 sccb->mask_length = sizeof(sccb_mask_t);
119 rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
120 } while (rc == -EBUSY);
121}
122
123void __init sclp_facilities_detect(void) 103void __init sclp_facilities_detect(void)
124{ 104{
125 struct read_info_sccb *sccb; 105 struct read_info_sccb *sccb;
@@ -131,34 +111,9 @@ void __init sclp_facilities_detect(void)
131 sccb = &early_read_info_sccb; 111 sccb = &early_read_info_sccb;
132 sclp_facilities = sccb->facilities; 112 sclp_facilities = sccb->facilities;
133 sclp_fac84 = sccb->fac84; 113 sclp_fac84 = sccb->fac84;
134 sclp_fac85 = sccb->fac85;
135 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 114 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
136 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 115 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
137 rzm <<= 20; 116 rzm <<= 20;
138
139 sclp_event_mask_early();
140}
141
142bool __init sclp_has_linemode(void)
143{
144 struct init_sccb *sccb = &early_event_mask_sccb;
145
146 if (sccb->header.response_code != 0x20)
147 return 0;
148 if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))
149 return 1;
150 return 0;
151}
152
153bool __init sclp_has_vt220(void)
154{
155 struct init_sccb *sccb = &early_event_mask_sccb;
156
157 if (sccb->header.response_code != 0x20)
158 return 0;
159 if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
160 return 1;
161 return 0;
162} 117}
163 118
164unsigned long long sclp_get_rnmax(void) 119unsigned long long sclp_get_rnmax(void)
@@ -171,12 +126,6 @@ unsigned long long sclp_get_rzm(void)
171 return rzm; 126 return rzm;
172} 127}
173 128
174u8 sclp_get_fac85(void)
175{
176 return sclp_fac85;
177}
178EXPORT_SYMBOL_GPL(sclp_get_fac85);
179
180/* 129/*
181 * This function will be called after sclp_facilities_detect(), which gets 130 * This function will be called after sclp_facilities_detect(), which gets
182 * called from early.c code. Therefore the sccb should have valid contents. 131 * called from early.c code. Therefore the sccb should have valid contents.
@@ -402,15 +351,7 @@ out:
402 351
403static int sclp_assign_storage(u16 rn) 352static int sclp_assign_storage(u16 rn)
404{ 353{
405 unsigned long long start; 354 return do_assign_storage(0x000d0001, rn);
406 int rc;
407
408 rc = do_assign_storage(0x000d0001, rn);
409 if (rc)
410 return rc;
411 start = rn2addr(rn);
412 storage_key_init_range(start, start + rzm);
413 return 0;
414} 355}
415 356
416static int sclp_unassign_storage(u16 rn) 357static int sclp_unassign_storage(u16 rn)
@@ -500,8 +441,9 @@ static int sclp_mem_notifier(struct notifier_block *nb,
500 start = arg->start_pfn << PAGE_SHIFT; 441 start = arg->start_pfn << PAGE_SHIFT;
501 size = arg->nr_pages << PAGE_SHIFT; 442 size = arg->nr_pages << PAGE_SHIFT;
502 mutex_lock(&sclp_mem_mutex); 443 mutex_lock(&sclp_mem_mutex);
503 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) 444 for (id = 0; id <= sclp_max_storage_id; id++)
504 sclp_attach_storage(id); 445 if (!test_bit(id, sclp_storage_ids))
446 sclp_attach_storage(id);
505 switch (action) { 447 switch (action) {
506 case MEM_ONLINE: 448 case MEM_ONLINE:
507 case MEM_GOING_OFFLINE: 449 case MEM_GOING_OFFLINE:
@@ -702,67 +644,6 @@ __initcall(sclp_detect_standby_memory);
702#endif /* CONFIG_MEMORY_HOTPLUG */ 644#endif /* CONFIG_MEMORY_HOTPLUG */
703 645
704/* 646/*
705 * PCI I/O adapter configuration related functions.
706 */
707#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
708#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
709
710#define SCLP_RECONFIG_PCI_ATPYE 2
711
712struct pci_cfg_sccb {
713 struct sccb_header header;
714 u8 atype; /* adapter type */
715 u8 reserved1;
716 u16 reserved2;
717 u32 aid; /* adapter identifier */
718} __packed;
719
720static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
721{
722 struct pci_cfg_sccb *sccb;
723 int rc;
724
725 if (!SCLP_HAS_PCI_RECONFIG)
726 return -EOPNOTSUPP;
727
728 sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
729 if (!sccb)
730 return -ENOMEM;
731
732 sccb->header.length = PAGE_SIZE;
733 sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
734 sccb->aid = fid;
735 rc = do_sync_request(cmd, sccb);
736 if (rc)
737 goto out;
738 switch (sccb->header.response_code) {
739 case 0x0020:
740 case 0x0120:
741 break;
742 default:
743 pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
744 cmd, sccb->header.response_code);
745 rc = -EIO;
746 break;
747 }
748out:
749 free_page((unsigned long) sccb);
750 return rc;
751}
752
753int sclp_pci_configure(u32 fid)
754{
755 return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
756}
757EXPORT_SYMBOL(sclp_pci_configure);
758
759int sclp_pci_deconfigure(u32 fid)
760{
761 return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
762}
763EXPORT_SYMBOL(sclp_pci_deconfigure);
764
765/*
766 * Channel path configuration related functions. 647 * Channel path configuration related functions.
767 */ 648 */
768 649
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 444d36183a2..95b909ac2b7 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/char/sclp_config.c
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */ 6 */
@@ -9,7 +11,7 @@
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/errno.h> 12#include <linux/errno.h>
11#include <linux/cpu.h> 13#include <linux/cpu.h>
12#include <linux/device.h> 14#include <linux/sysdev.h>
13#include <linux/workqueue.h> 15#include <linux/workqueue.h>
14#include <asm/smp.h> 16#include <asm/smp.h>
15 17
@@ -29,14 +31,14 @@ static struct work_struct sclp_cpu_change_work;
29static void sclp_cpu_capability_notify(struct work_struct *work) 31static void sclp_cpu_capability_notify(struct work_struct *work)
30{ 32{
31 int cpu; 33 int cpu;
32 struct device *dev; 34 struct sys_device *sysdev;
33 35
34 s390_adjust_jiffies(); 36 s390_adjust_jiffies();
35 pr_warning("cpu capability changed.\n"); 37 pr_warning("cpu capability changed.\n");
36 get_online_cpus(); 38 get_online_cpus();
37 for_each_online_cpu(cpu) { 39 for_each_online_cpu(cpu) {
38 dev = get_cpu_device(cpu); 40 sysdev = get_cpu_sysdev(cpu);
39 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 41 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
40 } 42 }
41 put_online_cpus(); 43 put_online_cpus();
42} 44}
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index d70d8c20229..5716487b8c9 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/sclp_cpi.c
2 * SCLP control programm identification 3 * SCLP control programm identification
3 * 4 *
4 * Copyright IBM Corp. 2001, 2007 5 * Copyright IBM Corp. 2001, 2007
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 2acea809e2a..4a51e3f0968 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/sclp_cpi_sys.c
2 * SCLP control program identification sysfs interface 3 * SCLP control program identification sysfs interface
3 * 4 *
4 * Copyright IBM Corp. 2001, 2007 5 * Copyright IBM Corp. 2001, 2007
@@ -20,7 +21,6 @@
20#include <linux/err.h> 21#include <linux/err.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/completion.h> 23#include <linux/completion.h>
23#include <linux/export.h>
24#include <asm/ebcdic.h> 24#include <asm/ebcdic.h>
25#include <asm/sclp.h> 25#include <asm/sclp.h>
26 26
diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h
index 65bb6a99c97..deef3e6ff49 100644
--- a/drivers/s390/char/sclp_cpi_sys.h
+++ b/drivers/s390/char/sclp_cpi_sys.h
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/sclp_cpi_sys.h
2 * SCLP control program identification sysfs interface 3 * SCLP control program identification sysfs interface
3 * 4 *
4 * Copyright IBM Corp. 2007 5 * Copyright IBM Corp. 2007
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index 2553db0fdb5..ab294d5a534 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/sclp_ocf.c
2 * SCLP OCF communication parameters sysfs interface 3 * SCLP OCF communication parameters sysfs interface
3 * 4 *
4 * Copyright IBM Corp. 2011 5 * Copyright IBM Corp. 2011
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 475e470d976..a90a02c28d6 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/char/sclp_quiesce.c
2 * signal quiesce handler 3 * signal quiesce handler
3 * 4 *
4 * Copyright IBM Corp. 1999, 2004 5 * (C) Copyright IBM Corp. 1999,2004
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
7 */ 8 */
@@ -14,6 +15,7 @@
14#include <linux/reboot.h> 15#include <linux/reboot.h>
15#include <linux/atomic.h> 16#include <linux/atomic.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/sigp.h>
17#include <asm/smp.h> 19#include <asm/smp.h>
18 20
19#include "sclp.h" 21#include "sclp.h"
@@ -28,8 +30,7 @@ static void do_machine_quiesce(void)
28 psw_t quiesce_psw; 30 psw_t quiesce_psw;
29 31
30 smp_send_stop(); 32 smp_send_stop();
31 quiesce_psw.mask = 33 quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
32 PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
33 quiesce_psw.addr = 0xfff; 34 quiesce_psw.addr = 0xfff;
34 __load_psw(quiesce_psw); 35 __load_psw(quiesce_psw);
35} 36}
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 3b13d58fe87..4be63be7344 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -463,7 +463,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
463 /* Use write priority message */ 463 /* Use write priority message */
464 sccb->msg_buf.header.type = EVTYP_PMSGCMD; 464 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
465 else 465 else
466 return -EOPNOTSUPP; 466 return -ENOSYS;
467 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; 467 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
468 buffer->request.status = SCLP_REQ_FILLED; 468 buffer->request.status = SCLP_REQ_FILLED;
469 buffer->request.callback = sclp_writedata_callback; 469 buffer->request.callback = sclp_writedata_callback;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index b1032931a1c..fa733ecd3d7 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -1,14 +1,13 @@
1/* 1/*
2 * Sclp "store data in absolut storage" 2 * Sclp "store data in absolut storage"
3 * 3 *
4 * Copyright IBM Corp. 2003, 2007 4 * Copyright IBM Corp. 2003,2007
5 * Author(s): Michael Holzheu 5 * Author(s): Michael Holzheu
6 */ 6 */
7 7
8#define KMSG_COMPONENT "sclp_sdias" 8#define KMSG_COMPONENT "sclp_sdias"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 10
11#include <linux/completion.h>
12#include <linux/sched.h> 11#include <linux/sched.h>
13#include <asm/sclp.h> 12#include <asm/sclp.h>
14#include <asm/debug.h> 13#include <asm/debug.h>
@@ -63,29 +62,15 @@ struct sdias_sccb {
63} __attribute__((packed)); 62} __attribute__((packed));
64 63
65static struct sdias_sccb sccb __attribute__((aligned(4096))); 64static struct sdias_sccb sccb __attribute__((aligned(4096)));
66static struct sdias_evbuf sdias_evbuf;
67 65
68static DECLARE_COMPLETION(evbuf_accepted); 66static int sclp_req_done;
69static DECLARE_COMPLETION(evbuf_done); 67static wait_queue_head_t sdias_wq;
70static DEFINE_MUTEX(sdias_mutex); 68static DEFINE_MUTEX(sdias_mutex);
71 69
72/*
73 * Called by SCLP base when read event data has been completed (async mode only)
74 */
75static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
76{
77 memcpy(&sdias_evbuf, evbuf,
78 min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
79 complete(&evbuf_done);
80 TRACE("sclp_sdias_receiver_fn done\n");
81}
82
83/*
84 * Called by SCLP base when sdias event has been accepted
85 */
86static void sdias_callback(struct sclp_req *request, void *data) 70static void sdias_callback(struct sclp_req *request, void *data)
87{ 71{
88 complete(&evbuf_accepted); 72 sclp_req_done = 1;
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
89 TRACE("callback done\n"); 74 TRACE("callback done\n");
90} 75}
91 76
@@ -95,6 +80,7 @@ static int sdias_sclp_send(struct sclp_req *req)
95 int rc; 80 int rc;
96 81
97 for (retries = SDIAS_RETRIES; retries; retries--) { 82 for (retries = SDIAS_RETRIES; retries; retries--) {
83 sclp_req_done = 0;
98 TRACE("add request\n"); 84 TRACE("add request\n");
99 rc = sclp_add_request(req); 85 rc = sclp_add_request(req);
100 if (rc) { 86 if (rc) {
@@ -105,31 +91,16 @@ static int sdias_sclp_send(struct sclp_req *req)
105 continue; 91 continue;
106 } 92 }
107 /* initiated, wait for completion of service call */ 93 /* initiated, wait for completion of service call */
108 wait_for_completion(&evbuf_accepted); 94 wait_event(sdias_wq, (sclp_req_done == 1));
109 if (req->status == SCLP_REQ_FAILED) { 95 if (req->status == SCLP_REQ_FAILED) {
110 TRACE("sclp request failed\n"); 96 TRACE("sclp request failed\n");
97 rc = -EIO;
111 continue; 98 continue;
112 } 99 }
113 /* if not accepted, retry */
114 if (!(sccb.evbuf.hdr.flags & 0x80)) {
115 TRACE("sclp request failed: flags=%x\n",
116 sccb.evbuf.hdr.flags);
117 continue;
118 }
119 /*
120 * for the sync interface the response is in the initial sccb
121 */
122 if (!sclp_sdias_register.receiver_fn) {
123 memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
124 TRACE("sync request done\n");
125 return 0;
126 }
127 /* otherwise we wait for completion */
128 wait_for_completion(&evbuf_done);
129 TRACE("request done\n"); 100 TRACE("request done\n");
130 return 0; 101 break;
131 } 102 }
132 return -EIO; 103 return rc;
133} 104}
134 105
135/* 106/*
@@ -169,12 +140,13 @@ int sclp_sdias_blk_count(void)
169 goto out; 140 goto out;
170 } 141 }
171 142
172 switch (sdias_evbuf.event_status) { 143 switch (sccb.evbuf.event_status) {
173 case 0: 144 case 0:
174 rc = sdias_evbuf.blk_cnt; 145 rc = sccb.evbuf.blk_cnt;
175 break; 146 break;
176 default: 147 default:
177 pr_err("SCLP error: %x\n", sdias_evbuf.event_status); 148 pr_err("SCLP error: %x\n",
149 sccb.evbuf.event_status);
178 rc = -EIO; 150 rc = -EIO;
179 goto out; 151 goto out;
180 } 152 }
@@ -211,7 +183,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
211 sccb.evbuf.event_qual = EQ_STORE_DATA; 183 sccb.evbuf.event_qual = EQ_STORE_DATA;
212 sccb.evbuf.data_id = DI_FCP_DUMP; 184 sccb.evbuf.data_id = DI_FCP_DUMP;
213 sccb.evbuf.event_id = 4712; 185 sccb.evbuf.event_id = 4712;
214#ifdef CONFIG_64BIT 186#ifdef __s390x__
215 sccb.evbuf.asa_size = ASA_SIZE_64; 187 sccb.evbuf.asa_size = ASA_SIZE_64;
216#else 188#else
217 sccb.evbuf.asa_size = ASA_SIZE_32; 189 sccb.evbuf.asa_size = ASA_SIZE_32;
@@ -239,20 +211,18 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
239 goto out; 211 goto out;
240 } 212 }
241 213
242 switch (sdias_evbuf.event_status) { 214 switch (sccb.evbuf.event_status) {
243 case EVSTATE_ALL_STORED: 215 case EVSTATE_ALL_STORED:
244 TRACE("all stored\n"); 216 TRACE("all stored\n");
245 break;
246 case EVSTATE_PART_STORED: 217 case EVSTATE_PART_STORED:
247 TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); 218 TRACE("part stored: %i\n", sccb.evbuf.blk_cnt);
248 break; 219 break;
249 case EVSTATE_NO_DATA: 220 case EVSTATE_NO_DATA:
250 TRACE("no data\n"); 221 TRACE("no data\n");
251 /* fall through */
252 default: 222 default:
253 pr_err("Error from SCLP while copying hsa. " 223 pr_err("Error from SCLP while copying hsa. "
254 "Event status = %x\n", 224 "Event status = %x\n",
255 sdias_evbuf.event_status); 225 sccb.evbuf.event_status);
256 rc = -EIO; 226 rc = -EIO;
257 } 227 }
258out: 228out:
@@ -260,50 +230,19 @@ out:
260 return rc; 230 return rc;
261} 231}
262 232
263static int __init sclp_sdias_register_check(void) 233int __init sclp_sdias_init(void)
264{ 234{
265 int rc; 235 int rc;
266 236
267 rc = sclp_register(&sclp_sdias_register);
268 if (rc)
269 return rc;
270 if (sclp_sdias_blk_count() == 0) {
271 sclp_unregister(&sclp_sdias_register);
272 return -ENODEV;
273 }
274 return 0;
275}
276
277static int __init sclp_sdias_init_sync(void)
278{
279 TRACE("Try synchronous mode\n");
280 sclp_sdias_register.receive_mask = 0;
281 sclp_sdias_register.receiver_fn = NULL;
282 return sclp_sdias_register_check();
283}
284
285static int __init sclp_sdias_init_async(void)
286{
287 TRACE("Try asynchronous mode\n");
288 sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
289 sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
290 return sclp_sdias_register_check();
291}
292
293int __init sclp_sdias_init(void)
294{
295 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 237 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
296 return 0; 238 return 0;
297 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long)); 239 sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
298 debug_register_view(sdias_dbf, &debug_sprintf_view); 240 debug_register_view(sdias_dbf, &debug_sprintf_view);
299 debug_set_level(sdias_dbf, 6); 241 debug_set_level(sdias_dbf, 6);
300 if (sclp_sdias_init_sync() == 0) 242 rc = sclp_register(&sclp_sdias_register);
301 goto out; 243 if (rc)
302 if (sclp_sdias_init_async() == 0) 244 return rc;
303 goto out; 245 init_waitqueue_head(&sdias_wq);
304 TRACE("init failed\n");
305 return -ENODEV;
306out:
307 TRACE("init done\n"); 246 TRACE("init done\n");
308 return 0; 247 return 0;
309} 248}
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 877fbc37c1e..a879c139926 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * drivers/s390/char/sclp_tty.c
2 * SCLP line mode terminal driver. 3 * SCLP line mode terminal driver.
3 * 4 *
4 * S390 version 5 * S390 version
5 * Copyright IBM Corp. 1999 6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com> 7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 9 */
@@ -47,7 +48,7 @@ static struct sclp_buffer *sclp_ttybuf;
47/* Timer for delayed output of console messages. */ 48/* Timer for delayed output of console messages. */
48static struct timer_list sclp_tty_timer; 49static struct timer_list sclp_tty_timer;
49 50
50static struct tty_port sclp_port; 51static struct tty_struct *sclp_tty;
51static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE]; 52static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
52static unsigned short int sclp_tty_chars_count; 53static unsigned short int sclp_tty_chars_count;
53 54
@@ -63,7 +64,7 @@ static int sclp_tty_columns = 80;
63static int 64static int
64sclp_tty_open(struct tty_struct *tty, struct file *filp) 65sclp_tty_open(struct tty_struct *tty, struct file *filp)
65{ 66{
66 tty_port_tty_set(&sclp_port, tty); 67 sclp_tty = tty;
67 tty->driver_data = NULL; 68 tty->driver_data = NULL;
68 tty->low_latency = 0; 69 tty->low_latency = 0;
69 return 0; 70 return 0;
@@ -75,7 +76,7 @@ sclp_tty_close(struct tty_struct *tty, struct file *filp)
75{ 76{
76 if (tty->count > 1) 77 if (tty->count > 1)
77 return; 78 return;
78 tty_port_tty_set(&sclp_port, NULL); 79 sclp_tty = NULL;
79} 80}
80 81
81/* 82/*
@@ -107,7 +108,6 @@ sclp_tty_write_room (struct tty_struct *tty)
107static void 108static void
108sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc) 109sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
109{ 110{
110 struct tty_struct *tty;
111 unsigned long flags; 111 unsigned long flags;
112 void *page; 112 void *page;
113 113
@@ -126,10 +126,8 @@ sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
126 spin_unlock_irqrestore(&sclp_tty_lock, flags); 126 spin_unlock_irqrestore(&sclp_tty_lock, flags);
127 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback)); 127 } while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
128 /* check if the tty needs a wake up call */ 128 /* check if the tty needs a wake up call */
129 tty = tty_port_tty_get(&sclp_port); 129 if (sclp_tty != NULL) {
130 if (tty != NULL) { 130 tty_wakeup(sclp_tty);
131 tty_wakeup(tty);
132 tty_kref_put(tty);
133 } 131 }
134} 132}
135 133
@@ -328,22 +326,21 @@ sclp_tty_flush_buffer(struct tty_struct *tty)
328static void 326static void
329sclp_tty_input(unsigned char* buf, unsigned int count) 327sclp_tty_input(unsigned char* buf, unsigned int count)
330{ 328{
331 struct tty_struct *tty = tty_port_tty_get(&sclp_port);
332 unsigned int cchar; 329 unsigned int cchar;
333 330
334 /* 331 /*
335 * If this tty driver is currently closed 332 * If this tty driver is currently closed
336 * then throw the received input away. 333 * then throw the received input away.
337 */ 334 */
338 if (tty == NULL) 335 if (sclp_tty == NULL)
339 return; 336 return;
340 cchar = ctrlchar_handle(buf, count, tty); 337 cchar = ctrlchar_handle(buf, count, sclp_tty);
341 switch (cchar & CTRLCHAR_MASK) { 338 switch (cchar & CTRLCHAR_MASK) {
342 case CTRLCHAR_SYSRQ: 339 case CTRLCHAR_SYSRQ:
343 break; 340 break;
344 case CTRLCHAR_CTRL: 341 case CTRLCHAR_CTRL:
345 tty_insert_flip_char(tty, cchar, TTY_NORMAL); 342 tty_insert_flip_char(sclp_tty, cchar, TTY_NORMAL);
346 tty_flip_buffer_push(tty); 343 tty_flip_buffer_push(sclp_tty);
347 break; 344 break;
348 case CTRLCHAR_NONE: 345 case CTRLCHAR_NONE:
349 /* send (normal) input to line discipline */ 346 /* send (normal) input to line discipline */
@@ -351,14 +348,13 @@ sclp_tty_input(unsigned char* buf, unsigned int count)
351 (strncmp((const char *) buf + count - 2, "^n", 2) && 348 (strncmp((const char *) buf + count - 2, "^n", 2) &&
352 strncmp((const char *) buf + count - 2, "\252n", 2))) { 349 strncmp((const char *) buf + count - 2, "\252n", 2))) {
353 /* add the auto \n */ 350 /* add the auto \n */
354 tty_insert_flip_string(tty, buf, count); 351 tty_insert_flip_string(sclp_tty, buf, count);
355 tty_insert_flip_char(tty, '\n', TTY_NORMAL); 352 tty_insert_flip_char(sclp_tty, '\n', TTY_NORMAL);
356 } else 353 } else
357 tty_insert_flip_string(tty, buf, count - 2); 354 tty_insert_flip_string(sclp_tty, buf, count - 2);
358 tty_flip_buffer_push(tty); 355 tty_flip_buffer_push(sclp_tty);
359 break; 356 break;
360 } 357 }
361 tty_kref_put(tty);
362} 358}
363 359
364/* 360/*
@@ -547,6 +543,7 @@ sclp_tty_init(void)
547 sclp_tty_tolower = 1; 543 sclp_tty_tolower = 1;
548 } 544 }
549 sclp_tty_chars_count = 0; 545 sclp_tty_chars_count = 0;
546 sclp_tty = NULL;
550 547
551 rc = sclp_register(&sclp_input_event); 548 rc = sclp_register(&sclp_input_event);
552 if (rc) { 549 if (rc) {
@@ -554,8 +551,7 @@ sclp_tty_init(void)
554 return rc; 551 return rc;
555 } 552 }
556 553
557 tty_port_init(&sclp_port); 554 driver->owner = THIS_MODULE;
558
559 driver->driver_name = "sclp_line"; 555 driver->driver_name = "sclp_line";
560 driver->name = "sclp_line"; 556 driver->name = "sclp_line";
561 driver->major = TTY_MAJOR; 557 driver->major = TTY_MAJOR;
@@ -568,11 +564,9 @@ sclp_tty_init(void)
568 driver->init_termios.c_lflag = ISIG | ECHO; 564 driver->init_termios.c_lflag = ISIG | ECHO;
569 driver->flags = TTY_DRIVER_REAL_RAW; 565 driver->flags = TTY_DRIVER_REAL_RAW;
570 tty_set_operations(driver, &sclp_ops); 566 tty_set_operations(driver, &sclp_ops);
571 tty_port_link_device(&sclp_port, driver, 0);
572 rc = tty_register_driver(driver); 567 rc = tty_register_driver(driver);
573 if (rc) { 568 if (rc) {
574 put_tty_driver(driver); 569 put_tty_driver(driver);
575 tty_port_destroy(&sclp_port);
576 return rc; 570 return rc;
577 } 571 }
578 sclp_tty_driver = driver; 572 sclp_tty_driver = driver;
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
index c8773421c31..4b965b22fec 100644
--- a/drivers/s390/char/sclp_tty.h
+++ b/drivers/s390/char/sclp_tty.h
@@ -1,8 +1,9 @@
1/* 1/*
2 * drivers/s390/char/sclp_tty.h
2 * interface to the SCLP-read/write driver 3 * interface to the SCLP-read/write driver
3 * 4 *
4 * S390 version 5 * S390 version
5 * Copyright IBM Corp. 1999 6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com> 7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 9 */
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index effcc8756e0..5d706e6c946 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -34,6 +34,7 @@
34#define SCLP_VT220_DEVICE_NAME "ttysclp" 34#define SCLP_VT220_DEVICE_NAME "ttysclp"
35#define SCLP_VT220_CONSOLE_NAME "ttyS" 35#define SCLP_VT220_CONSOLE_NAME "ttyS"
36#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */ 36#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
37#define SCLP_VT220_BUF_SIZE 80
37 38
38/* Representation of a single write request */ 39/* Representation of a single write request */
39struct sclp_vt220_request { 40struct sclp_vt220_request {
@@ -55,7 +56,8 @@ struct sclp_vt220_sccb {
55/* Structures and data needed to register tty driver */ 56/* Structures and data needed to register tty driver */
56static struct tty_driver *sclp_vt220_driver; 57static struct tty_driver *sclp_vt220_driver;
57 58
58static struct tty_port sclp_vt220_port; 59/* The tty_struct that the kernel associated with us */
60static struct tty_struct *sclp_vt220_tty;
59 61
60/* Lock to protect internal data from concurrent access */ 62/* Lock to protect internal data from concurrent access */
61static spinlock_t sclp_vt220_lock; 63static spinlock_t sclp_vt220_lock;
@@ -114,7 +116,6 @@ static struct sclp_register sclp_vt220_register = {
114static void 116static void
115sclp_vt220_process_queue(struct sclp_vt220_request *request) 117sclp_vt220_process_queue(struct sclp_vt220_request *request)
116{ 118{
117 struct tty_struct *tty;
118 unsigned long flags; 119 unsigned long flags;
119 void *page; 120 void *page;
120 121
@@ -140,10 +141,8 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
140 if (request == NULL && sclp_vt220_flush_later) 141 if (request == NULL && sclp_vt220_flush_later)
141 sclp_vt220_emit_current(); 142 sclp_vt220_emit_current();
142 /* Check if the tty needs a wake up call */ 143 /* Check if the tty needs a wake up call */
143 tty = tty_port_tty_get(&sclp_vt220_port); 144 if (sclp_vt220_tty != NULL) {
144 if (tty) { 145 tty_wakeup(sclp_vt220_tty);
145 tty_wakeup(tty);
146 tty_kref_put(tty);
147 } 146 }
148} 147}
149 148
@@ -461,12 +460,11 @@ sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
461static void 460static void
462sclp_vt220_receiver_fn(struct evbuf_header *evbuf) 461sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
463{ 462{
464 struct tty_struct *tty = tty_port_tty_get(&sclp_vt220_port);
465 char *buffer; 463 char *buffer;
466 unsigned int count; 464 unsigned int count;
467 465
468 /* Ignore input if device is not open */ 466 /* Ignore input if device is not open */
469 if (tty == NULL) 467 if (sclp_vt220_tty == NULL)
470 return; 468 return;
471 469
472 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header)); 470 buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
@@ -480,11 +478,10 @@ sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
480 /* Send input to line discipline */ 478 /* Send input to line discipline */
481 buffer++; 479 buffer++;
482 count--; 480 count--;
483 tty_insert_flip_string(tty, buffer, count); 481 tty_insert_flip_string(sclp_vt220_tty, buffer, count);
484 tty_flip_buffer_push(tty); 482 tty_flip_buffer_push(sclp_vt220_tty);
485 break; 483 break;
486 } 484 }
487 tty_kref_put(tty);
488} 485}
489 486
490/* 487/*
@@ -494,7 +491,10 @@ static int
494sclp_vt220_open(struct tty_struct *tty, struct file *filp) 491sclp_vt220_open(struct tty_struct *tty, struct file *filp)
495{ 492{
496 if (tty->count == 1) { 493 if (tty->count == 1) {
497 tty_port_tty_set(&sclp_vt220_port, tty); 494 sclp_vt220_tty = tty;
495 tty->driver_data = kmalloc(SCLP_VT220_BUF_SIZE, GFP_KERNEL);
496 if (tty->driver_data == NULL)
497 return -ENOMEM;
498 tty->low_latency = 0; 498 tty->low_latency = 0;
499 if (!tty->winsize.ws_row && !tty->winsize.ws_col) { 499 if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
500 tty->winsize.ws_row = 24; 500 tty->winsize.ws_row = 24;
@@ -510,8 +510,11 @@ sclp_vt220_open(struct tty_struct *tty, struct file *filp)
510static void 510static void
511sclp_vt220_close(struct tty_struct *tty, struct file *filp) 511sclp_vt220_close(struct tty_struct *tty, struct file *filp)
512{ 512{
513 if (tty->count == 1) 513 if (tty->count == 1) {
514 tty_port_tty_set(&sclp_vt220_port, NULL); 514 sclp_vt220_tty = NULL;
515 kfree(tty->driver_data);
516 tty->driver_data = NULL;
517 }
515} 518}
516 519
517/* 520/*
@@ -615,7 +618,6 @@ static void __init __sclp_vt220_cleanup(void)
615 return; 618 return;
616 sclp_unregister(&sclp_vt220_register); 619 sclp_unregister(&sclp_vt220_register);
617 __sclp_vt220_free_pages(); 620 __sclp_vt220_free_pages();
618 tty_port_destroy(&sclp_vt220_port);
619} 621}
620 622
621/* Allocate buffer pages and register with sclp core. Controlled by init 623/* Allocate buffer pages and register with sclp core. Controlled by init
@@ -633,9 +635,9 @@ static int __init __sclp_vt220_init(int num_pages)
633 INIT_LIST_HEAD(&sclp_vt220_empty); 635 INIT_LIST_HEAD(&sclp_vt220_empty);
634 INIT_LIST_HEAD(&sclp_vt220_outqueue); 636 INIT_LIST_HEAD(&sclp_vt220_outqueue);
635 init_timer(&sclp_vt220_timer); 637 init_timer(&sclp_vt220_timer);
636 tty_port_init(&sclp_vt220_port);
637 sclp_vt220_current_request = NULL; 638 sclp_vt220_current_request = NULL;
638 sclp_vt220_buffered_chars = 0; 639 sclp_vt220_buffered_chars = 0;
640 sclp_vt220_tty = NULL;
639 sclp_vt220_flush_later = 0; 641 sclp_vt220_flush_later = 0;
640 642
641 /* Allocate pages for output buffering */ 643 /* Allocate pages for output buffering */
@@ -651,7 +653,6 @@ out:
651 if (rc) { 653 if (rc) {
652 __sclp_vt220_free_pages(); 654 __sclp_vt220_free_pages();
653 sclp_vt220_init_count--; 655 sclp_vt220_init_count--;
654 tty_port_destroy(&sclp_vt220_port);
655 } 656 }
656 return rc; 657 return rc;
657} 658}
@@ -684,6 +685,7 @@ static int __init sclp_vt220_tty_init(void)
684 if (rc) 685 if (rc)
685 goto out_driver; 686 goto out_driver;
686 687
688 driver->owner = THIS_MODULE;
687 driver->driver_name = SCLP_VT220_DRIVER_NAME; 689 driver->driver_name = SCLP_VT220_DRIVER_NAME;
688 driver->name = SCLP_VT220_DEVICE_NAME; 690 driver->name = SCLP_VT220_DEVICE_NAME;
689 driver->major = SCLP_VT220_MAJOR; 691 driver->major = SCLP_VT220_MAJOR;
@@ -693,7 +695,6 @@ static int __init sclp_vt220_tty_init(void)
693 driver->init_termios = tty_std_termios; 695 driver->init_termios = tty_std_termios;
694 driver->flags = TTY_DRIVER_REAL_RAW; 696 driver->flags = TTY_DRIVER_REAL_RAW;
695 tty_set_operations(driver, &sclp_vt220_ops); 697 tty_set_operations(driver, &sclp_vt220_ops);
696 tty_port_link_device(&sclp_vt220_port, driver, 0);
697 698
698 rc = tty_register_driver(driver); 699 rc = tty_register_driver(driver);
699 if (rc) 700 if (rc)
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index ea664dd4f56..267b54e8ff5 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/tape.h
2 * tape device driver for 3480/3490E/3590 tapes. 3 * tape device driver for 3480/3490E/3590 tapes.
3 * 4 *
4 * S390 and zSeries version 5 * S390 and zSeries version
@@ -15,6 +16,7 @@
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include <asm/debug.h> 17#include <asm/debug.h>
17#include <asm/idals.h> 18#include <asm/idals.h>
19#include <linux/blkdev.h>
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/mtio.h> 22#include <linux/mtio.h>
@@ -152,6 +154,12 @@ struct tape_discipline {
152 struct tape_request *(*read_block)(struct tape_device *, size_t); 154 struct tape_request *(*read_block)(struct tape_device *, size_t);
153 struct tape_request *(*write_block)(struct tape_device *, size_t); 155 struct tape_request *(*write_block)(struct tape_device *, size_t);
154 void (*process_eov)(struct tape_device*); 156 void (*process_eov)(struct tape_device*);
157#ifdef CONFIG_S390_TAPE_BLOCK
158 /* Block device stuff. */
159 struct tape_request *(*bread)(struct tape_device *, struct request *);
160 void (*check_locate)(struct tape_device *, struct tape_request *);
161 void (*free_bread)(struct tape_request *);
162#endif
155 /* ioctl function for additional ioctls. */ 163 /* ioctl function for additional ioctls. */
156 int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long); 164 int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
157 /* Array of tape commands with TAPE_NR_MTOPS entries */ 165 /* Array of tape commands with TAPE_NR_MTOPS entries */
@@ -174,6 +182,26 @@ struct tape_char_data {
174 int block_size; /* of size block_size. */ 182 int block_size; /* of size block_size. */
175}; 183};
176 184
185#ifdef CONFIG_S390_TAPE_BLOCK
186/* Block Frontend Data */
187struct tape_blk_data
188{
189 struct tape_device * device;
190 /* Block device request queue. */
191 struct request_queue * request_queue;
192 spinlock_t request_queue_lock;
193
194 /* Task to move entries from block request to CCS request queue. */
195 struct work_struct requeue_task;
196 atomic_t requeue_scheduled;
197
198 /* Current position on the tape. */
199 long block_position;
200 int medium_changed;
201 struct gendisk * disk;
202};
203#endif
204
177/* Tape Info */ 205/* Tape Info */
178struct tape_device { 206struct tape_device {
179 /* entry in tape_device_list */ 207 /* entry in tape_device_list */
@@ -220,6 +248,10 @@ struct tape_device {
220 248
221 /* Character device frontend data */ 249 /* Character device frontend data */
222 struct tape_char_data char_data; 250 struct tape_char_data char_data;
251#ifdef CONFIG_S390_TAPE_BLOCK
252 /* Block dev frontend data */
253 struct tape_blk_data blk_data;
254#endif
223 255
224 /* Function to start or stop the next request later. */ 256 /* Function to start or stop the next request later. */
225 struct delayed_work tape_dnr; 257 struct delayed_work tape_dnr;
@@ -281,6 +313,19 @@ extern void tapechar_exit(void);
281extern int tapechar_setup_device(struct tape_device *); 313extern int tapechar_setup_device(struct tape_device *);
282extern void tapechar_cleanup_device(struct tape_device *); 314extern void tapechar_cleanup_device(struct tape_device *);
283 315
316/* Externals from tape_block.c */
317#ifdef CONFIG_S390_TAPE_BLOCK
318extern int tapeblock_init (void);
319extern void tapeblock_exit(void);
320extern int tapeblock_setup_device(struct tape_device *);
321extern void tapeblock_cleanup_device(struct tape_device *);
322#else
323static inline int tapeblock_init (void) {return 0;}
324static inline void tapeblock_exit (void) {;}
325static inline int tapeblock_setup_device(struct tape_device *t) {return 0;}
326static inline void tapeblock_cleanup_device (struct tape_device *t) {;}
327#endif
328
284/* tape initialisation functions */ 329/* tape initialisation functions */
285#ifdef CONFIG_PROC_FS 330#ifdef CONFIG_PROC_FS
286extern void tape_proc_init (void); 331extern void tape_proc_init (void);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 9aa79702b37..9eff2df70dd 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/tape_34xx.c
2 * tape device discipline for 3480/3490 tapes. 3 * tape device discipline for 3480/3490 tapes.
3 * 4 *
4 * Copyright IBM Corp. 2001, 2009 5 * Copyright IBM Corp. 2001, 2009
@@ -322,6 +323,20 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
322 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0; 323 inhibit_cu_recovery = (*device->modeset_byte & 0x80) ? 1 : 0;
323 sense = irb->ecw; 324 sense = irb->ecw;
324 325
326#ifdef CONFIG_S390_TAPE_BLOCK
327 if (request->op == TO_BLOCK) {
328 /*
329 * Recovery for block device requests. Set the block_position
330 * to something invalid and retry.
331 */
332 device->blk_data.block_position = -1;
333 if (request->retries-- <= 0)
334 return tape_34xx_erp_failed(request, -EIO);
335 else
336 return tape_34xx_erp_retry(request);
337 }
338#endif
339
325 if ( 340 if (
326 sense[0] & SENSE_COMMAND_REJECT && 341 sense[0] & SENSE_COMMAND_REJECT &&
327 sense[1] & SENSE_WRITE_PROTECT 342 sense[1] & SENSE_WRITE_PROTECT
@@ -1114,6 +1129,123 @@ tape_34xx_mtseek(struct tape_device *device, int mt_count)
1114 return tape_do_io_free(device, request); 1129 return tape_do_io_free(device, request);
1115} 1130}
1116 1131
1132#ifdef CONFIG_S390_TAPE_BLOCK
1133/*
1134 * Tape block read for 34xx.
1135 */
1136static struct tape_request *
1137tape_34xx_bread(struct tape_device *device, struct request *req)
1138{
1139 struct tape_request *request;
1140 struct ccw1 *ccw;
1141 int count = 0;
1142 unsigned off;
1143 char *dst;
1144 struct bio_vec *bv;
1145 struct req_iterator iter;
1146 struct tape_34xx_block_id * start_block;
1147
1148 DBF_EVENT(6, "xBREDid:");
1149
1150 /* Count the number of blocks for the request. */
1151 rq_for_each_segment(bv, req, iter)
1152 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
1153
1154 /* Allocate the ccw request. */
1155 request = tape_alloc_request(3+count+1, 8);
1156 if (IS_ERR(request))
1157 return request;
1158
1159 /* Setup ccws. */
1160 request->op = TO_BLOCK;
1161 start_block = (struct tape_34xx_block_id *) request->cpdata;
1162 start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
1163 DBF_EVENT(6, "start_block = %i\n", start_block->block);
1164
1165 ccw = request->cpaddr;
1166 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
1167
1168 /*
1169 * We always setup a nop after the mode set ccw. This slot is
1170 * used in tape_std_check_locate to insert a locate ccw if the
1171 * current tape position doesn't match the start block to be read.
1172 * The second nop will be filled with a read block id which is in
1173 * turn used by tape_34xx_free_bread to populate the segment bid
1174 * table.
1175 */
1176 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1177 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
1178
1179 rq_for_each_segment(bv, req, iter) {
1180 dst = kmap(bv->bv_page) + bv->bv_offset;
1181 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
1182 ccw->flags = CCW_FLAG_CC;
1183 ccw->cmd_code = READ_FORWARD;
1184 ccw->count = TAPEBLOCK_HSEC_SIZE;
1185 set_normalized_cda(ccw, (void*) __pa(dst));
1186 ccw++;
1187 dst += TAPEBLOCK_HSEC_SIZE;
1188 }
1189 }
1190
1191 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
1192 DBF_EVENT(6, "xBREDccwg\n");
1193 return request;
1194}
1195
1196static void
1197tape_34xx_free_bread (struct tape_request *request)
1198{
1199 struct ccw1* ccw;
1200
1201 ccw = request->cpaddr;
1202 if ((ccw + 2)->cmd_code == READ_BLOCK_ID) {
1203 struct {
1204 struct tape_34xx_block_id cbid;
1205 struct tape_34xx_block_id dbid;
1206 } __attribute__ ((packed)) *rbi_data;
1207
1208 rbi_data = request->cpdata;
1209
1210 if (request->device)
1211 tape_34xx_add_sbid(request->device, rbi_data->cbid);
1212 }
1213
1214 /* Last ccw is a nop and doesn't need clear_normalized_cda */
1215 for (; ccw->flags & CCW_FLAG_CC; ccw++)
1216 if (ccw->cmd_code == READ_FORWARD)
1217 clear_normalized_cda(ccw);
1218 tape_free_request(request);
1219}
1220
1221/*
1222 * check_locate is called just before the tape request is passed to
1223 * the common io layer for execution. It has to check the current
1224 * tape position and insert a locate ccw if it doesn't match the
1225 * start block for the request.
1226 */
1227static void
1228tape_34xx_check_locate(struct tape_device *device, struct tape_request *request)
1229{
1230 struct tape_34xx_block_id * start_block;
1231
1232 start_block = (struct tape_34xx_block_id *) request->cpdata;
1233 if (start_block->block == device->blk_data.block_position)
1234 return;
1235
1236 DBF_LH(4, "Block seek(%06d+%06d)\n", start_block->block, device->bof);
1237 start_block->wrap = 0;
1238 start_block->segment = 1;
1239 start_block->format = (*device->modeset_byte & 0x08) ?
1240 TAPE34XX_FMT_3480_XF :
1241 TAPE34XX_FMT_3480;
1242 start_block->block = start_block->block + device->bof;
1243 tape_34xx_merge_sbid(device, start_block);
1244 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
1245 tape_ccw_cc(request->cpaddr + 2, READ_BLOCK_ID, 8, request->cpdata);
1246}
1247#endif
1248
1117/* 1249/*
1118 * List of 3480/3490 magnetic tape commands. 1250 * List of 3480/3490 magnetic tape commands.
1119 */ 1251 */
@@ -1163,6 +1295,11 @@ static struct tape_discipline tape_discipline_34xx = {
1163 .irq = tape_34xx_irq, 1295 .irq = tape_34xx_irq,
1164 .read_block = tape_std_read_block, 1296 .read_block = tape_std_read_block,
1165 .write_block = tape_std_write_block, 1297 .write_block = tape_std_write_block,
1298#ifdef CONFIG_S390_TAPE_BLOCK
1299 .bread = tape_34xx_bread,
1300 .free_bread = tape_34xx_free_bread,
1301 .check_locate = tape_34xx_check_locate,
1302#endif
1166 .ioctl_fn = tape_34xx_ioctl, 1303 .ioctl_fn = tape_34xx_ioctl,
1167 .mtop_array = tape_34xx_mtop 1304 .mtop_array = tape_34xx_mtop
1168}; 1305};
@@ -1193,7 +1330,6 @@ static struct ccw_driver tape_34xx_driver = {
1193 .set_online = tape_34xx_online, 1330 .set_online = tape_34xx_online,
1194 .set_offline = tape_generic_offline, 1331 .set_offline = tape_generic_offline,
1195 .freeze = tape_generic_pm_suspend, 1332 .freeze = tape_generic_pm_suspend,
1196 .int_class = IRQIO_TAP,
1197}; 1333};
1198 1334
1199static int 1335static int
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 327cb19ad0b..a7d57072888 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/tape_3590.c
2 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
3 * 4 *
4 * Copyright IBM Corp. 2001, 2009 5 * Copyright IBM Corp. 2001, 2009
@@ -669,6 +670,92 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
669 return 0; 670 return 0;
670} 671}
671 672
673#ifdef CONFIG_S390_TAPE_BLOCK
674/*
675 * Tape Block READ
676 */
677static struct tape_request *
678tape_3590_bread(struct tape_device *device, struct request *req)
679{
680 struct tape_request *request;
681 struct ccw1 *ccw;
682 int count = 0, start_block;
683 unsigned off;
684 char *dst;
685 struct bio_vec *bv;
686 struct req_iterator iter;
687
688 DBF_EVENT(6, "xBREDid:");
689 start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B;
690 DBF_EVENT(6, "start_block = %i\n", start_block);
691
692 rq_for_each_segment(bv, req, iter)
693 count += bv->bv_len >> (TAPEBLOCK_HSEC_S2B + 9);
694
695 request = tape_alloc_request(2 + count + 1, 4);
696 if (IS_ERR(request))
697 return request;
698 request->op = TO_BLOCK;
699 *(__u32 *) request->cpdata = start_block;
700 ccw = request->cpaddr;
701 ccw = tape_ccw_cc(ccw, MODE_SET_DB, 1, device->modeset_byte);
702
703 /*
704 * We always setup a nop after the mode set ccw. This slot is
705 * used in tape_std_check_locate to insert a locate ccw if the
706 * current tape position doesn't match the start block to be read.
707 */
708 ccw = tape_ccw_cc(ccw, NOP, 0, NULL);
709
710 rq_for_each_segment(bv, req, iter) {
711 dst = page_address(bv->bv_page) + bv->bv_offset;
712 for (off = 0; off < bv->bv_len; off += TAPEBLOCK_HSEC_SIZE) {
713 ccw->flags = CCW_FLAG_CC;
714 ccw->cmd_code = READ_FORWARD;
715 ccw->count = TAPEBLOCK_HSEC_SIZE;
716 set_normalized_cda(ccw, (void *) __pa(dst));
717 ccw++;
718 dst += TAPEBLOCK_HSEC_SIZE;
719 }
720 BUG_ON(off > bv->bv_len);
721 }
722 ccw = tape_ccw_end(ccw, NOP, 0, NULL);
723 DBF_EVENT(6, "xBREDccwg\n");
724 return request;
725}
726
727static void
728tape_3590_free_bread(struct tape_request *request)
729{
730 struct ccw1 *ccw;
731
732 /* Last ccw is a nop and doesn't need clear_normalized_cda */
733 for (ccw = request->cpaddr; ccw->flags & CCW_FLAG_CC; ccw++)
734 if (ccw->cmd_code == READ_FORWARD)
735 clear_normalized_cda(ccw);
736 tape_free_request(request);
737}
738
739/*
740 * check_locate is called just before the tape request is passed to
741 * the common io layer for execution. It has to check the current
742 * tape position and insert a locate ccw if it doesn't match the
743 * start block for the request.
744 */
745static void
746tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
747{
748 __u32 *start_block;
749
750 start_block = (__u32 *) request->cpdata;
751 if (*start_block != device->blk_data.block_position) {
752 /* Add the start offset of the file to get the real block. */
753 *start_block += device->bof;
754 tape_ccw_cc(request->cpaddr + 1, LOCATE, 4, request->cpdata);
755 }
756}
757#endif
758
672static void tape_3590_med_state_set(struct tape_device *device, 759static void tape_3590_med_state_set(struct tape_device *device,
673 struct tape_3590_med_sense *sense) 760 struct tape_3590_med_sense *sense)
674{ 761{
@@ -1336,6 +1423,20 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1336{ 1423{
1337 struct tape_3590_sense *sense; 1424 struct tape_3590_sense *sense;
1338 1425
1426#ifdef CONFIG_S390_TAPE_BLOCK
1427 if (request->op == TO_BLOCK) {
1428 /*
1429 * Recovery for block device requests. Set the block_position
1430 * to something invalid and retry.
1431 */
1432 device->blk_data.block_position = -1;
1433 if (request->retries-- <= 0)
1434 return tape_3590_erp_failed(device, request, irb, -EIO);
1435 else
1436 return tape_3590_erp_retry(device, request, irb);
1437 }
1438#endif
1439
1339 sense = (struct tape_3590_sense *) irb->ecw; 1440 sense = (struct tape_3590_sense *) irb->ecw;
1340 1441
1341 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); 1442 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
@@ -1628,6 +1729,11 @@ static struct tape_discipline tape_discipline_3590 = {
1628 .irq = tape_3590_irq, 1729 .irq = tape_3590_irq,
1629 .read_block = tape_std_read_block, 1730 .read_block = tape_std_read_block,
1630 .write_block = tape_std_write_block, 1731 .write_block = tape_std_write_block,
1732#ifdef CONFIG_S390_TAPE_BLOCK
1733 .bread = tape_3590_bread,
1734 .free_bread = tape_3590_free_bread,
1735 .check_locate = tape_3590_check_locate,
1736#endif
1631 .ioctl_fn = tape_3590_ioctl, 1737 .ioctl_fn = tape_3590_ioctl,
1632 .mtop_array = tape_3590_mtop 1738 .mtop_array = tape_3590_mtop
1633}; 1739};
@@ -1656,7 +1762,6 @@ static struct ccw_driver tape_3590_driver = {
1656 .set_offline = tape_generic_offline, 1762 .set_offline = tape_generic_offline,
1657 .set_online = tape_3590_online, 1763 .set_online = tape_3590_online,
1658 .freeze = tape_generic_pm_suspend, 1764 .freeze = tape_generic_pm_suspend,
1659 .int_class = IRQIO_TAP,
1660}; 1765};
1661 1766
1662/* 1767/*
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index 36b759e89d2..4534055f137 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/char/tape_3590.h
2 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
3 * 4 *
4 * Copyright IBM Corp. 2001, 2006 5 * Copyright IBM Corp. 2001,2006
5 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
6 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 2d61db3fc62..87cd0ab242d 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * drivers/s390/char/tape_char.c
2 * character device frontend for tape device driver 3 * character device frontend for tape device driver
3 * 4 *
4 * S390 and zSeries version 5 * S390 and zSeries version
5 * Copyright IBM Corp. 2001, 2006 6 * Copyright IBM Corp. 2001,2006
6 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -160,6 +161,11 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
160 if (rc) 161 if (rc)
161 return rc; 162 return rc;
162 163
164#ifdef CONFIG_S390_TAPE_BLOCK
165 /* Changes position. */
166 device->blk_data.medium_changed = 1;
167#endif
168
163 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size); 169 DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
164 /* Let the discipline build the ccw chain. */ 170 /* Let the discipline build the ccw chain. */
165 request = device->discipline->read_block(device, block_size); 171 request = device->discipline->read_block(device, block_size);
@@ -212,6 +218,11 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
212 if (rc) 218 if (rc)
213 return rc; 219 return rc;
214 220
221#ifdef CONFIG_S390_TAPE_BLOCK
222 /* Changes position. */
223 device->blk_data.medium_changed = 1;
224#endif
225
215 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size); 226 DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
216 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks); 227 DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
217 /* Let the discipline build the ccw chain. */ 228 /* Let the discipline build the ccw chain. */
@@ -368,6 +379,9 @@ __tapechar_ioctl(struct tape_device *device,
368 case MTBSFM: 379 case MTBSFM:
369 case MTFSFM: 380 case MTFSFM:
370 case MTSEEK: 381 case MTSEEK:
382#ifdef CONFIG_S390_TAPE_BLOCK
383 device->blk_data.medium_changed = 1;
384#endif
371 if (device->required_tapemarks) 385 if (device->required_tapemarks)
372 tape_std_terminate_write(device); 386 tape_std_terminate_write(device);
373 default: 387 default:
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 54b3c79203f..55343df61ed 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright IBM Corp. 2004 2 * (C) Copyright IBM Corp. 2004
3 * tape_class.c
3 * 4 *
4 * Tape class device support 5 * Tape class device support
5 * 6 *
@@ -16,7 +17,7 @@
16 17
17MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>"); 18MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
18MODULE_DESCRIPTION( 19MODULE_DESCRIPTION(
19 "Copyright IBM Corp. 2004 All Rights Reserved.\n" 20 "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
20 "tape_class.c" 21 "tape_class.c"
21); 22);
22MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index a332c10d50a..9e32780c317 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright IBM Corp. 2004 All Rights Reserved. 2 * (C) Copyright IBM Corp. 2004 All Rights Reserved.
3 * tape_class.h
3 * 4 *
4 * Tape class device support 5 * Tape class device support
5 * 6 *
@@ -13,6 +14,7 @@
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/fs.h> 15#include <linux/fs.h>
15#include <linux/major.h> 16#include <linux/major.h>
17#include <linux/kobj_map.h>
16#include <linux/cdev.h> 18#include <linux/cdev.h>
17 19
18#include <linux/device.h> 20#include <linux/device.h>
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index f3b5123faf0..7978a0adeaf 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/tape_core.c
2 * basic function of the tape device driver 3 * basic function of the tape device driver
3 * 4 *
4 * S390 and zSeries version 5 * S390 and zSeries version
@@ -13,6 +14,7 @@
13#define KMSG_COMPONENT "tape" 14#define KMSG_COMPONENT "tape"
14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 16
17#include <linux/kernel_stat.h>
16#include <linux/module.h> 18#include <linux/module.h>
17#include <linux/init.h> // for kernel parameters 19#include <linux/init.h> // for kernel parameters
18#include <linux/kmod.h> // for requesting modules 20#include <linux/kmod.h> // for requesting modules
@@ -400,6 +402,9 @@ tape_generic_online(struct tape_device *device,
400 rc = tapechar_setup_device(device); 402 rc = tapechar_setup_device(device);
401 if (rc) 403 if (rc)
402 goto out_minor; 404 goto out_minor;
405 rc = tapeblock_setup_device(device);
406 if (rc)
407 goto out_char;
403 408
404 tape_state_set(device, TS_UNUSED); 409 tape_state_set(device, TS_UNUSED);
405 410
@@ -407,6 +412,8 @@ tape_generic_online(struct tape_device *device,
407 412
408 return 0; 413 return 0;
409 414
415out_char:
416 tapechar_cleanup_device(device);
410out_minor: 417out_minor:
411 tape_remove_minor(device); 418 tape_remove_minor(device);
412out_discipline: 419out_discipline:
@@ -420,6 +427,7 @@ out:
420static void 427static void
421tape_cleanup_device(struct tape_device *device) 428tape_cleanup_device(struct tape_device *device)
422{ 429{
430 tapeblock_cleanup_device(device);
423 tapechar_cleanup_device(device); 431 tapechar_cleanup_device(device);
424 device->discipline->cleanup_device(device); 432 device->discipline->cleanup_device(device);
425 module_put(device->discipline->owner); 433 module_put(device->discipline->owner);
@@ -778,6 +786,10 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
778{ 786{
779 int rc; 787 int rc;
780 788
789#ifdef CONFIG_S390_TAPE_BLOCK
790 if (request->op == TO_BLOCK)
791 device->discipline->check_locate(device, request);
792#endif
781 rc = ccw_device_start( 793 rc = ccw_device_start(
782 device->cdev, 794 device->cdev,
783 request->cpaddr, 795 request->cpaddr,
@@ -1103,6 +1115,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1103 struct tape_request *request; 1115 struct tape_request *request;
1104 int rc; 1116 int rc;
1105 1117
1118 kstat_cpu(smp_processor_id()).irqs[IOINT_TAP]++;
1106 device = dev_get_drvdata(&cdev->dev); 1119 device = dev_get_drvdata(&cdev->dev);
1107 if (device == NULL) { 1120 if (device == NULL) {
1108 return; 1121 return;
@@ -1242,7 +1255,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1242} 1255}
1243 1256
1244/* 1257/*
1245 * Tape device open function used by tape_char frontend. 1258 * Tape device open function used by tape_char & tape_block frontends.
1246 */ 1259 */
1247int 1260int
1248tape_open(struct tape_device *device) 1261tape_open(struct tape_device *device)
@@ -1272,7 +1285,7 @@ tape_open(struct tape_device *device)
1272} 1285}
1273 1286
1274/* 1287/*
1275 * Tape device release function used by tape_char frontend. 1288 * Tape device release function used by tape_char & tape_block frontends.
1276 */ 1289 */
1277int 1290int
1278tape_release(struct tape_device *device) 1291tape_release(struct tape_device *device)
@@ -1333,6 +1346,7 @@ tape_init (void)
1333 DBF_EVENT(3, "tape init\n"); 1346 DBF_EVENT(3, "tape init\n");
1334 tape_proc_init(); 1347 tape_proc_init();
1335 tapechar_init (); 1348 tapechar_init ();
1349 tapeblock_init ();
1336 return 0; 1350 return 0;
1337} 1351}
1338 1352
@@ -1346,6 +1360,7 @@ tape_exit(void)
1346 1360
1347 /* Get rid of the frontends */ 1361 /* Get rid of the frontends */
1348 tapechar_exit(); 1362 tapechar_exit();
1363 tapeblock_exit();
1349 tape_proc_cleanup(); 1364 tape_proc_cleanup();
1350 debug_unregister (TAPE_DBF_AREA); 1365 debug_unregister (TAPE_DBF_AREA);
1351} 1366}
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 8733b232a11..0ceb37984f7 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * drivers/s390/char/tape.c
2 * tape device driver for S/390 and zSeries tapes. 3 * tape device driver for S/390 and zSeries tapes.
3 * 4 *
4 * S390 and zSeries version 5 * S390 and zSeries version
5 * Copyright IBM Corp. 2001 6 * Copyright (C) 2001 IBM Corporation
6 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index 981a99fd8d4..e7650170274 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * drivers/s390/char/tape_std.c
2 * standard tape device functions for ibm tapes. 3 * standard tape device functions for ibm tapes.
3 * 4 *
4 * S390 and zSeries version 5 * S390 and zSeries version
5 * Copyright IBM Corp. 2001, 2002 6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index 8c760c03683..1fc95235934 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/char/tape_std.h
2 * standard tape device functions for ibm tapes. 3 * standard tape device functions for ibm tapes.
3 * 4 *
4 * Copyright IBM Corp. 2001, 2006 5 * Copyright (C) IBM Corp. 2001,2006
5 * Author(s): Carsten Otte <cotte@de.ibm.com> 6 * Author(s): Carsten Otte <cotte@de.ibm.com>
6 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 7 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -100,7 +101,11 @@ struct tape_request *tape_std_read_block(struct tape_device *, size_t);
100void tape_std_read_backward(struct tape_device *device, 101void tape_std_read_backward(struct tape_device *device,
101 struct tape_request *request); 102 struct tape_request *request);
102struct tape_request *tape_std_write_block(struct tape_device *, size_t); 103struct tape_request *tape_std_write_block(struct tape_device *, size_t);
104struct tape_request *tape_std_bread(struct tape_device *, struct request *);
105void tape_std_free_bread(struct tape_request *);
103void tape_std_check_locate(struct tape_device *, struct tape_request *); 106void tape_std_check_locate(struct tape_device *, struct tape_request *);
107struct tape_request *tape_std_bwrite(struct request *,
108 struct tape_device *, int);
104 109
105/* Some non-mtop commands. */ 110/* Some non-mtop commands. */
106int tape_std_assign(struct tape_device *); 111int tape_std_assign(struct tape_device *);
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 43ea0593bdb..2db1482b406 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * drivers/s390/char/tty3270.c
2 * IBM/3270 Driver - tty functions. 3 * IBM/3270 Driver - tty functions.
3 * 4 *
4 * Author(s): 5 * Author(s):
5 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) 6 * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
6 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * -- Copyright IBM Corp. 2003 8 * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 */ 9 */
9 10
10#include <linux/module.h> 11#include <linux/module.h>
@@ -60,7 +61,7 @@ struct tty3270_line {
60 */ 61 */
61struct tty3270 { 62struct tty3270 {
62 struct raw3270_view view; 63 struct raw3270_view view;
63 struct tty_port port; 64 struct tty_struct *tty; /* Pointer to tty structure */
64 void **freemem_pages; /* Array of pages used for freemem. */ 65 void **freemem_pages; /* Array of pages used for freemem. */
65 struct list_head freemem; /* List of free memory for strings. */ 66 struct list_head freemem; /* List of free memory for strings. */
66 67
@@ -323,8 +324,9 @@ tty3270_blank_line(struct tty3270 *tp)
323static void 324static void
324tty3270_write_callback(struct raw3270_request *rq, void *data) 325tty3270_write_callback(struct raw3270_request *rq, void *data)
325{ 326{
326 struct tty3270 *tp = container_of(rq->view, struct tty3270, view); 327 struct tty3270 *tp;
327 328
329 tp = (struct tty3270 *) rq->view;
328 if (rq->rc != 0) { 330 if (rq->rc != 0) {
329 /* Write wasn't successful. Refresh all. */ 331 /* Write wasn't successful. Refresh all. */
330 tp->update_flags = TTY_UPDATE_ALL; 332 tp->update_flags = TTY_UPDATE_ALL;
@@ -448,9 +450,10 @@ tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
448static void 450static void
449tty3270_rcl_backward(struct kbd_data *kbd) 451tty3270_rcl_backward(struct kbd_data *kbd)
450{ 452{
451 struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); 453 struct tty3270 *tp;
452 struct string *s; 454 struct string *s;
453 455
456 tp = kbd->tty->driver_data;
454 spin_lock_bh(&tp->view.lock); 457 spin_lock_bh(&tp->view.lock);
455 if (tp->inattr == TF_INPUT) { 458 if (tp->inattr == TF_INPUT) {
456 if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines) 459 if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
@@ -475,8 +478,9 @@ tty3270_rcl_backward(struct kbd_data *kbd)
475static void 478static void
476tty3270_exit_tty(struct kbd_data *kbd) 479tty3270_exit_tty(struct kbd_data *kbd)
477{ 480{
478 struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); 481 struct tty3270 *tp;
479 482
483 tp = kbd->tty->driver_data;
480 raw3270_deactivate_view(&tp->view); 484 raw3270_deactivate_view(&tp->view);
481} 485}
482 486
@@ -486,9 +490,10 @@ tty3270_exit_tty(struct kbd_data *kbd)
486static void 490static void
487tty3270_scroll_forward(struct kbd_data *kbd) 491tty3270_scroll_forward(struct kbd_data *kbd)
488{ 492{
489 struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); 493 struct tty3270 *tp;
490 int nr_up; 494 int nr_up;
491 495
496 tp = kbd->tty->driver_data;
492 spin_lock_bh(&tp->view.lock); 497 spin_lock_bh(&tp->view.lock);
493 nr_up = tp->nr_up - tp->view.rows + 2; 498 nr_up = tp->nr_up - tp->view.rows + 2;
494 if (nr_up < 0) 499 if (nr_up < 0)
@@ -508,9 +513,10 @@ tty3270_scroll_forward(struct kbd_data *kbd)
508static void 513static void
509tty3270_scroll_backward(struct kbd_data *kbd) 514tty3270_scroll_backward(struct kbd_data *kbd)
510{ 515{
511 struct tty3270 *tp = container_of(kbd->port, struct tty3270, port); 516 struct tty3270 *tp;
512 int nr_up; 517 int nr_up;
513 518
519 tp = kbd->tty->driver_data;
514 spin_lock_bh(&tp->view.lock); 520 spin_lock_bh(&tp->view.lock);
515 nr_up = tp->nr_up + tp->view.rows - 2; 521 nr_up = tp->nr_up + tp->view.rows - 2;
516 if (nr_up + tp->view.rows - 2 > tp->nr_lines) 522 if (nr_up + tp->view.rows - 2 > tp->nr_lines)
@@ -531,10 +537,11 @@ static void
531tty3270_read_tasklet(struct raw3270_request *rrq) 537tty3270_read_tasklet(struct raw3270_request *rrq)
532{ 538{
533 static char kreset_data = TW_KR; 539 static char kreset_data = TW_KR;
534 struct tty3270 *tp = container_of(rrq->view, struct tty3270, view); 540 struct tty3270 *tp;
535 char *input; 541 char *input;
536 int len; 542 int len;
537 543
544 tp = (struct tty3270 *) rrq->view;
538 spin_lock_bh(&tp->view.lock); 545 spin_lock_bh(&tp->view.lock);
539 /* 546 /*
540 * Two AID keys are special: For 0x7d (enter) the input line 547 * Two AID keys are special: For 0x7d (enter) the input line
@@ -570,10 +577,13 @@ tty3270_read_tasklet(struct raw3270_request *rrq)
570 raw3270_request_add_data(tp->kreset, &kreset_data, 1); 577 raw3270_request_add_data(tp->kreset, &kreset_data, 1);
571 raw3270_start(&tp->view, tp->kreset); 578 raw3270_start(&tp->view, tp->kreset);
572 579
573 while (len-- > 0) 580 /* Emit input string. */
574 kbd_keycode(tp->kbd, *input++); 581 if (tp->tty) {
575 /* Emit keycode for AID byte. */ 582 while (len-- > 0)
576 kbd_keycode(tp->kbd, 256 + tp->input->string[0]); 583 kbd_keycode(tp->kbd, *input++);
584 /* Emit keycode for AID byte. */
585 kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
586 }
577 587
578 raw3270_request_reset(rrq); 588 raw3270_request_reset(rrq);
579 xchg(&tp->read, rrq); 589 xchg(&tp->read, rrq);
@@ -586,10 +596,9 @@ tty3270_read_tasklet(struct raw3270_request *rrq)
586static void 596static void
587tty3270_read_callback(struct raw3270_request *rq, void *data) 597tty3270_read_callback(struct raw3270_request *rq, void *data)
588{ 598{
589 struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
590 raw3270_get_view(rq->view); 599 raw3270_get_view(rq->view);
591 /* Schedule tasklet to pass input to tty. */ 600 /* Schedule tasklet to pass input to tty. */
592 tasklet_schedule(&tp->readlet); 601 tasklet_schedule(&((struct tty3270 *) rq->view)->readlet);
593} 602}
594 603
595/* 604/*
@@ -626,8 +635,9 @@ tty3270_issue_read(struct tty3270 *tp, int lock)
626static int 635static int
627tty3270_activate(struct raw3270_view *view) 636tty3270_activate(struct raw3270_view *view)
628{ 637{
629 struct tty3270 *tp = container_of(view, struct tty3270, view); 638 struct tty3270 *tp;
630 639
640 tp = (struct tty3270 *) view;
631 tp->update_flags = TTY_UPDATE_ALL; 641 tp->update_flags = TTY_UPDATE_ALL;
632 tty3270_set_timer(tp, 1); 642 tty3270_set_timer(tp, 1);
633 return 0; 643 return 0;
@@ -636,8 +646,9 @@ tty3270_activate(struct raw3270_view *view)
636static void 646static void
637tty3270_deactivate(struct raw3270_view *view) 647tty3270_deactivate(struct raw3270_view *view)
638{ 648{
639 struct tty3270 *tp = container_of(view, struct tty3270, view); 649 struct tty3270 *tp;
640 650
651 tp = (struct tty3270 *) view;
641 del_timer(&tp->timer); 652 del_timer(&tp->timer);
642} 653}
643 654
@@ -679,17 +690,6 @@ tty3270_alloc_view(void)
679 if (!tp->freemem_pages) 690 if (!tp->freemem_pages)
680 goto out_tp; 691 goto out_tp;
681 INIT_LIST_HEAD(&tp->freemem); 692 INIT_LIST_HEAD(&tp->freemem);
682 INIT_LIST_HEAD(&tp->lines);
683 INIT_LIST_HEAD(&tp->update);
684 INIT_LIST_HEAD(&tp->rcl_lines);
685 tp->rcl_max = 20;
686 tty_port_init(&tp->port);
687 setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
688 (unsigned long) tp);
689 tasklet_init(&tp->readlet,
690 (void (*)(unsigned long)) tty3270_read_tasklet,
691 (unsigned long) tp->read);
692
693 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) { 693 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
694 tp->freemem_pages[pages] = (void *) 694 tp->freemem_pages[pages] = (void *)
695 __get_free_pages(GFP_KERNEL|GFP_DMA, 0); 695 __get_free_pages(GFP_KERNEL|GFP_DMA, 0);
@@ -722,7 +722,6 @@ out_pages:
722 while (pages--) 722 while (pages--)
723 free_pages((unsigned long) tp->freemem_pages[pages], 0); 723 free_pages((unsigned long) tp->freemem_pages[pages], 0);
724 kfree(tp->freemem_pages); 724 kfree(tp->freemem_pages);
725 tty_port_destroy(&tp->port);
726out_tp: 725out_tp:
727 kfree(tp); 726 kfree(tp);
728out_err: 727out_err:
@@ -745,7 +744,6 @@ tty3270_free_view(struct tty3270 *tp)
745 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) 744 for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
746 free_pages((unsigned long) tp->freemem_pages[pages], 0); 745 free_pages((unsigned long) tp->freemem_pages[pages], 0);
747 kfree(tp->freemem_pages); 746 kfree(tp->freemem_pages);
748 tty_port_destroy(&tp->port);
749 kfree(tp); 747 kfree(tp);
750} 748}
751 749
@@ -796,15 +794,16 @@ tty3270_free_screen(struct tty3270 *tp)
796static void 794static void
797tty3270_release(struct raw3270_view *view) 795tty3270_release(struct raw3270_view *view)
798{ 796{
799 struct tty3270 *tp = container_of(view, struct tty3270, view); 797 struct tty3270 *tp;
800 struct tty_struct *tty = tty_port_tty_get(&tp->port); 798 struct tty_struct *tty;
801 799
800 tp = (struct tty3270 *) view;
801 tty = tp->tty;
802 if (tty) { 802 if (tty) {
803 tty->driver_data = NULL; 803 tty->driver_data = NULL;
804 tty_port_tty_set(&tp->port, NULL); 804 tp->tty = tp->kbd->tty = NULL;
805 tty_hangup(tty); 805 tty_hangup(tty);
806 raw3270_put_view(&tp->view); 806 raw3270_put_view(&tp->view);
807 tty_kref_put(tty);
808 } 807 }
809} 808}
810 809
@@ -814,9 +813,8 @@ tty3270_release(struct raw3270_view *view)
814static void 813static void
815tty3270_free(struct raw3270_view *view) 814tty3270_free(struct raw3270_view *view)
816{ 815{
817 struct tty3270 *tp = container_of(view, struct tty3270, view); 816 tty3270_free_screen((struct tty3270 *) view);
818 tty3270_free_screen(tp); 817 tty3270_free_view((struct tty3270 *) view);
819 tty3270_free_view(tp);
820} 818}
821 819
822/* 820/*
@@ -825,13 +823,14 @@ tty3270_free(struct raw3270_view *view)
825static void 823static void
826tty3270_del_views(void) 824tty3270_del_views(void)
827{ 825{
826 struct tty3270 *tp;
828 int i; 827 int i;
829 828
830 for (i = 0; i < tty3270_max_index; i++) { 829 for (i = 0; i < tty3270_max_index; i++) {
831 struct raw3270_view *view = 830 tp = (struct tty3270 *)
832 raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR); 831 raw3270_find_view(&tty3270_fn, i + RAW3270_FIRSTMINOR);
833 if (!IS_ERR(view)) 832 if (!IS_ERR(tp))
834 raw3270_del_view(view); 833 raw3270_del_view(&tp->view);
835 } 834 }
836} 835}
837 836
@@ -844,33 +843,35 @@ static struct raw3270_fn tty3270_fn = {
844}; 843};
845 844
846/* 845/*
847 * This routine is called whenever a 3270 tty is opened first time. 846 * This routine is called whenever a 3270 tty is opened.
848 */ 847 */
849static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) 848static int
849tty3270_open(struct tty_struct *tty, struct file * filp)
850{ 850{
851 struct raw3270_view *view;
852 struct tty3270 *tp; 851 struct tty3270 *tp;
853 int i, rc; 852 int i, rc;
854 853
854 if (tty->count > 1)
855 return 0;
855 /* Check if the tty3270 is already there. */ 856 /* Check if the tty3270 is already there. */
856 view = raw3270_find_view(&tty3270_fn, 857 tp = (struct tty3270 *)
858 raw3270_find_view(&tty3270_fn,
857 tty->index + RAW3270_FIRSTMINOR); 859 tty->index + RAW3270_FIRSTMINOR);
858 if (!IS_ERR(view)) { 860 if (!IS_ERR(tp)) {
859 tp = container_of(view, struct tty3270, view);
860 tty->driver_data = tp; 861 tty->driver_data = tp;
861 tty->winsize.ws_row = tp->view.rows - 2; 862 tty->winsize.ws_row = tp->view.rows - 2;
862 tty->winsize.ws_col = tp->view.cols; 863 tty->winsize.ws_col = tp->view.cols;
863 tty->low_latency = 0; 864 tty->low_latency = 0;
864 /* why to reassign? */ 865 tp->tty = tty;
865 tty_port_tty_set(&tp->port, tty); 866 tp->kbd->tty = tty;
866 tp->inattr = TF_INPUT; 867 tp->inattr = TF_INPUT;
867 return tty_port_install(&tp->port, driver, tty); 868 return 0;
868 } 869 }
869 if (tty3270_max_index < tty->index + 1) 870 if (tty3270_max_index < tty->index + 1)
870 tty3270_max_index = tty->index + 1; 871 tty3270_max_index = tty->index + 1;
871 872
872 /* Quick exit if there is no device for tty->index. */ 873 /* Quick exit if there is no device for tty->index. */
873 if (PTR_ERR(view) == -ENODEV) 874 if (PTR_ERR(tp) == -ENODEV)
874 return -ENODEV; 875 return -ENODEV;
875 876
876 /* Allocate tty3270 structure on first open. */ 877 /* Allocate tty3270 structure on first open. */
@@ -878,6 +879,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
878 if (IS_ERR(tp)) 879 if (IS_ERR(tp))
879 return PTR_ERR(tp); 880 return PTR_ERR(tp);
880 881
882 INIT_LIST_HEAD(&tp->lines);
883 INIT_LIST_HEAD(&tp->update);
884 INIT_LIST_HEAD(&tp->rcl_lines);
885 tp->rcl_max = 20;
886 setup_timer(&tp->timer, (void (*)(unsigned long)) tty3270_update,
887 (unsigned long) tp);
888 tasklet_init(&tp->readlet,
889 (void (*)(unsigned long)) tty3270_read_tasklet,
890 (unsigned long) tp->read);
891
881 rc = raw3270_add_view(&tp->view, &tty3270_fn, 892 rc = raw3270_add_view(&tp->view, &tty3270_fn,
882 tty->index + RAW3270_FIRSTMINOR); 893 tty->index + RAW3270_FIRSTMINOR);
883 if (rc) { 894 if (rc) {
@@ -892,8 +903,9 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
892 return rc; 903 return rc;
893 } 904 }
894 905
895 tty_port_tty_set(&tp->port, tty); 906 tp->tty = tty;
896 tty->low_latency = 0; 907 tty->low_latency = 0;
908 tty->driver_data = tp;
897 tty->winsize.ws_row = tp->view.rows - 2; 909 tty->winsize.ws_row = tp->view.rows - 2;
898 tty->winsize.ws_col = tp->view.cols; 910 tty->winsize.ws_col = tp->view.cols;
899 911
@@ -905,7 +917,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
905 for (i = 0; i < tp->view.rows - 2; i++) 917 for (i = 0; i < tp->view.rows - 2; i++)
906 tty3270_blank_line(tp); 918 tty3270_blank_line(tp);
907 919
908 tp->kbd->port = &tp->port; 920 tp->kbd->tty = tty;
909 tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty; 921 tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
910 tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward; 922 tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
911 tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward; 923 tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
@@ -913,15 +925,6 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
913 kbd_ascebc(tp->kbd, tp->view.ascebc); 925 kbd_ascebc(tp->kbd, tp->view.ascebc);
914 926
915 raw3270_activate_view(&tp->view); 927 raw3270_activate_view(&tp->view);
916
917 rc = tty_port_install(&tp->port, driver, tty);
918 if (rc) {
919 raw3270_put_view(&tp->view);
920 return rc;
921 }
922
923 tty->driver_data = tp;
924
925 return 0; 928 return 0;
926} 929}
927 930
@@ -932,22 +935,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
932static void 935static void
933tty3270_close(struct tty_struct *tty, struct file * filp) 936tty3270_close(struct tty_struct *tty, struct file * filp)
934{ 937{
935 struct tty3270 *tp = tty->driver_data; 938 struct tty3270 *tp;
936 939
937 if (tty->count > 1) 940 if (tty->count > 1)
938 return; 941 return;
942 tp = (struct tty3270 *) tty->driver_data;
939 if (tp) { 943 if (tp) {
940 tty->driver_data = NULL; 944 tty->driver_data = NULL;
941 tty_port_tty_set(&tp->port, NULL); 945 tp->tty = tp->kbd->tty = NULL;
942 }
943}
944
945static void tty3270_cleanup(struct tty_struct *tty)
946{
947 struct tty3270 *tp = tty->driver_data;
948
949 if (tp)
950 raw3270_put_view(&tp->view); 946 raw3270_put_view(&tp->view);
947 }
951} 948}
952 949
953/* 950/*
@@ -1394,7 +1391,7 @@ tty3270_escape_sequence(struct tty3270 *tp, char ch)
1394 tty3270_lf(tp); 1391 tty3270_lf(tp);
1395 break; 1392 break;
1396 case 'Z': /* Respond ID. */ 1393 case 'Z': /* Respond ID. */
1397 kbd_puts_queue(&tp->port, "\033[?6c"); 1394 kbd_puts_queue(tp->tty, "\033[?6c");
1398 break; 1395 break;
1399 case '7': /* Save cursor position. */ 1396 case '7': /* Save cursor position. */
1400 tp->saved_cx = tp->cx; 1397 tp->saved_cx = tp->cx;
@@ -1440,11 +1437,11 @@ tty3270_escape_sequence(struct tty3270 *tp, char ch)
1440 tp->esc_state = ESnormal; 1437 tp->esc_state = ESnormal;
1441 if (ch == 'n' && !tp->esc_ques) { 1438 if (ch == 'n' && !tp->esc_ques) {
1442 if (tp->esc_par[0] == 5) /* Status report. */ 1439 if (tp->esc_par[0] == 5) /* Status report. */
1443 kbd_puts_queue(&tp->port, "\033[0n"); 1440 kbd_puts_queue(tp->tty, "\033[0n");
1444 else if (tp->esc_par[0] == 6) { /* Cursor report. */ 1441 else if (tp->esc_par[0] == 6) { /* Cursor report. */
1445 char buf[40]; 1442 char buf[40];
1446 sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1); 1443 sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
1447 kbd_puts_queue(&tp->port, buf); 1444 kbd_puts_queue(tp->tty, buf);
1448 } 1445 }
1449 return; 1446 return;
1450 } 1447 }
@@ -1516,13 +1513,12 @@ tty3270_escape_sequence(struct tty3270 *tp, char ch)
1516 * String write routine for 3270 ttys 1513 * String write routine for 3270 ttys
1517 */ 1514 */
1518static void 1515static void
1519tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty, 1516tty3270_do_write(struct tty3270 *tp, const unsigned char *buf, int count)
1520 const unsigned char *buf, int count)
1521{ 1517{
1522 int i_msg, i; 1518 int i_msg, i;
1523 1519
1524 spin_lock_bh(&tp->view.lock); 1520 spin_lock_bh(&tp->view.lock);
1525 for (i_msg = 0; !tty->stopped && i_msg < count; i_msg++) { 1521 for (i_msg = 0; !tp->tty->stopped && i_msg < count; i_msg++) {
1526 if (tp->esc_state != 0) { 1522 if (tp->esc_state != 0) {
1527 /* Continue escape sequence. */ 1523 /* Continue escape sequence. */
1528 tty3270_escape_sequence(tp, buf[i_msg]); 1524 tty3270_escape_sequence(tp, buf[i_msg]);
@@ -1599,10 +1595,10 @@ tty3270_write(struct tty_struct * tty,
1599 if (!tp) 1595 if (!tp)
1600 return 0; 1596 return 0;
1601 if (tp->char_count > 0) { 1597 if (tp->char_count > 0) {
1602 tty3270_do_write(tp, tty, tp->char_buf, tp->char_count); 1598 tty3270_do_write(tp, tp->char_buf, tp->char_count);
1603 tp->char_count = 0; 1599 tp->char_count = 0;
1604 } 1600 }
1605 tty3270_do_write(tp, tty, buf, count); 1601 tty3270_do_write(tp, buf, count);
1606 return count; 1602 return count;
1607} 1603}
1608 1604
@@ -1633,7 +1629,7 @@ tty3270_flush_chars(struct tty_struct *tty)
1633 if (!tp) 1629 if (!tp)
1634 return; 1630 return;
1635 if (tp->char_count > 0) { 1631 if (tp->char_count > 0) {
1636 tty3270_do_write(tp, tty, tp->char_buf, tp->char_count); 1632 tty3270_do_write(tp, tp->char_buf, tp->char_count);
1637 tp->char_count = 0; 1633 tp->char_count = 0;
1638 } 1634 }
1639} 1635}
@@ -1751,8 +1747,7 @@ static long tty3270_compat_ioctl(struct tty_struct *tty,
1751#endif 1747#endif
1752 1748
1753static const struct tty_operations tty3270_ops = { 1749static const struct tty_operations tty3270_ops = {
1754 .install = tty3270_install, 1750 .open = tty3270_open,
1755 .cleanup = tty3270_cleanup,
1756 .close = tty3270_close, 1751 .close = tty3270_close,
1757 .write = tty3270_write, 1752 .write = tty3270_write,
1758 .put_char = tty3270_put_char, 1753 .put_char = tty3270_put_char,
@@ -1789,6 +1784,7 @@ static int __init tty3270_init(void)
1789 * Entries in tty3270_driver that are NOT initialized: 1784 * Entries in tty3270_driver that are NOT initialized:
1790 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc 1785 * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
1791 */ 1786 */
1787 driver->owner = THIS_MODULE;
1792 driver->driver_name = "ttyTUB"; 1788 driver->driver_name = "ttyTUB";
1793 driver->name = "ttyTUB"; 1789 driver->name = "ttyTUB";
1794 driver->major = IBM_TTY3270_MAJOR; 1790 driver->major = IBM_TTY3270_MAJOR;
@@ -1796,7 +1792,7 @@ static int __init tty3270_init(void)
1796 driver->type = TTY_DRIVER_TYPE_SYSTEM; 1792 driver->type = TTY_DRIVER_TYPE_SYSTEM;
1797 driver->subtype = SYSTEM_TYPE_TTY; 1793 driver->subtype = SYSTEM_TYPE_TTY;
1798 driver->init_termios = tty_std_termios; 1794 driver->init_termios = tty_std_termios;
1799 driver->flags = TTY_DRIVER_RESET_TERMIOS; 1795 driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_DYNAMIC_DEV;
1800 tty_set_operations(driver, &tty3270_ops); 1796 tty_set_operations(driver, &tty3270_ops);
1801 ret = tty_register_driver(driver); 1797 ret = tty_register_driver(driver);
1802 if (ret) { 1798 if (ret) {
@@ -1815,7 +1811,6 @@ tty3270_exit(void)
1815 driver = tty3270_driver; 1811 driver = tty3270_driver;
1816 tty3270_driver = NULL; 1812 tty3270_driver = NULL;
1817 tty_unregister_driver(driver); 1813 tty_unregister_driver(driver);
1818 put_tty_driver(driver);
1819 tty3270_del_views(); 1814 tty3270_del_views();
1820} 1815}
1821 1816
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
index 11141a8f897..799da57f039 100644
--- a/drivers/s390/char/tty3270.h
+++ b/drivers/s390/char/tty3270.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/char/tty3270.h
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * 5 *
4 */ 6 */
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 0fdedadff7b..31a3ccbb649 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2004, 2010 2 * Copyright IBM Corp. 2004,2010
3 * Interface implementation for communication with the z/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * 4 *
5 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> 5 * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
@@ -13,11 +13,9 @@
13 13
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/compat.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
18#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
19#include <linux/slab.h> 18#include <linux/slab.h>
20#include <linux/export.h>
21#include <asm/compat.h> 19#include <asm/compat.h>
22#include <asm/cpcmd.h> 20#include <asm/cpcmd.h>
23#include <asm/debug.h> 21#include <asm/debug.h>
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 1e29b041838..6a993948e18 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2004, 2005 2 * Copyright (C) 2004, 2005 IBM Corporation
3 * Interface implementation for communication with the z/VM control program 3 * Interface implementation for communication with the z/VM control program
4 * Version 1.0 4 * Version 1.0
5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> 5 * Author(s): Christian Borntraeger <cborntra@de.ibm.com>
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 9b3a24e8d3a..524d988d89d 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/char/vmlogrdr.c
2 * character device driver for reading z/VM system service records 3 * character device driver for reading z/VM system service records
3 * 4 *
4 * 5 *
@@ -321,7 +322,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
321 * only allow for blocking reads to be open 322 * only allow for blocking reads to be open
322 */ 323 */
323 if (filp->f_flags & O_NONBLOCK) 324 if (filp->f_flags & O_NONBLOCK)
324 return -EOPNOTSUPP; 325 return -ENOSYS;
325 326
326 /* Besure this device hasn't already been opened */ 327 /* Besure this device hasn't already been opened */
327 spin_lock_bh(&logptr->priv_lock); 328 spin_lock_bh(&logptr->priv_lock);
@@ -655,19 +656,10 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
655 len = strlen(buf); 656 len = strlen(buf);
656 return len; 657 return len;
657} 658}
659
660
658static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show, 661static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
659 NULL); 662 NULL);
660static struct attribute *vmlogrdr_drv_attrs[] = {
661 &driver_attr_recording_status.attr,
662 NULL,
663};
664static struct attribute_group vmlogrdr_drv_attr_group = {
665 .attrs = vmlogrdr_drv_attrs,
666};
667static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
668 &vmlogrdr_drv_attr_group,
669 NULL,
670};
671 663
672static struct attribute *vmlogrdr_attrs[] = { 664static struct attribute *vmlogrdr_attrs[] = {
673 &dev_attr_autopurge.attr, 665 &dev_attr_autopurge.attr,
@@ -676,13 +668,6 @@ static struct attribute *vmlogrdr_attrs[] = {
676 &dev_attr_recording.attr, 668 &dev_attr_recording.attr,
677 NULL, 669 NULL,
678}; 670};
679static struct attribute_group vmlogrdr_attr_group = {
680 .attrs = vmlogrdr_attrs,
681};
682static const struct attribute_group *vmlogrdr_attr_groups[] = {
683 &vmlogrdr_attr_group,
684 NULL,
685};
686 671
687static int vmlogrdr_pm_prepare(struct device *dev) 672static int vmlogrdr_pm_prepare(struct device *dev)
688{ 673{
@@ -707,14 +692,18 @@ static const struct dev_pm_ops vmlogrdr_pm_ops = {
707 .prepare = vmlogrdr_pm_prepare, 692 .prepare = vmlogrdr_pm_prepare,
708}; 693};
709 694
695static struct attribute_group vmlogrdr_attr_group = {
696 .attrs = vmlogrdr_attrs,
697};
698
710static struct class *vmlogrdr_class; 699static struct class *vmlogrdr_class;
711static struct device_driver vmlogrdr_driver = { 700static struct device_driver vmlogrdr_driver = {
712 .name = "vmlogrdr", 701 .name = "vmlogrdr",
713 .bus = &iucv_bus, 702 .bus = &iucv_bus,
714 .pm = &vmlogrdr_pm_ops, 703 .pm = &vmlogrdr_pm_ops,
715 .groups = vmlogrdr_drv_attr_groups,
716}; 704};
717 705
706
718static int vmlogrdr_register_driver(void) 707static int vmlogrdr_register_driver(void)
719{ 708{
720 int ret; 709 int ret;
@@ -728,14 +717,21 @@ static int vmlogrdr_register_driver(void)
728 if (ret) 717 if (ret)
729 goto out_iucv; 718 goto out_iucv;
730 719
720 ret = driver_create_file(&vmlogrdr_driver,
721 &driver_attr_recording_status);
722 if (ret)
723 goto out_driver;
724
731 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); 725 vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
732 if (IS_ERR(vmlogrdr_class)) { 726 if (IS_ERR(vmlogrdr_class)) {
733 ret = PTR_ERR(vmlogrdr_class); 727 ret = PTR_ERR(vmlogrdr_class);
734 vmlogrdr_class = NULL; 728 vmlogrdr_class = NULL;
735 goto out_driver; 729 goto out_attr;
736 } 730 }
737 return 0; 731 return 0;
738 732
733out_attr:
734 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
739out_driver: 735out_driver:
740 driver_unregister(&vmlogrdr_driver); 736 driver_unregister(&vmlogrdr_driver);
741out_iucv: 737out_iucv:
@@ -749,6 +745,7 @@ static void vmlogrdr_unregister_driver(void)
749{ 745{
750 class_destroy(vmlogrdr_class); 746 class_destroy(vmlogrdr_class);
751 vmlogrdr_class = NULL; 747 vmlogrdr_class = NULL;
748 driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
752 driver_unregister(&vmlogrdr_driver); 749 driver_unregister(&vmlogrdr_driver);
753 iucv_unregister(&vmlogrdr_iucv_handler, 1); 750 iucv_unregister(&vmlogrdr_iucv_handler, 1);
754} 751}
@@ -765,7 +762,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
765 dev->bus = &iucv_bus; 762 dev->bus = &iucv_bus;
766 dev->parent = iucv_root; 763 dev->parent = iucv_root;
767 dev->driver = &vmlogrdr_driver; 764 dev->driver = &vmlogrdr_driver;
768 dev->groups = vmlogrdr_attr_groups;
769 dev_set_drvdata(dev, priv); 765 dev_set_drvdata(dev, priv);
770 /* 766 /*
771 * The release function could be called after the 767 * The release function could be called after the
@@ -783,6 +779,11 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
783 return ret; 779 return ret;
784 } 780 }
785 781
782 ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
783 if (ret) {
784 device_unregister(dev);
785 return ret;
786 }
786 priv->class_device = device_create(vmlogrdr_class, dev, 787 priv->class_device = device_create(vmlogrdr_class, dev,
787 MKDEV(vmlogrdr_major, 788 MKDEV(vmlogrdr_major,
788 priv->minor_num), 789 priv->minor_num),
@@ -790,6 +791,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
790 if (IS_ERR(priv->class_device)) { 791 if (IS_ERR(priv->class_device)) {
791 ret = PTR_ERR(priv->class_device); 792 ret = PTR_ERR(priv->class_device);
792 priv->class_device=NULL; 793 priv->class_device=NULL;
794 sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
793 device_unregister(dev); 795 device_unregister(dev);
794 return ret; 796 return ret;
795 } 797 }
@@ -802,6 +804,7 @@ static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
802{ 804{
803 device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); 805 device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
804 if (priv->device != NULL) { 806 if (priv->device != NULL) {
807 sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
805 device_unregister(priv->device); 808 device_unregister(priv->device);
806 priv->device=NULL; 809 priv->device=NULL;
807 } 810 }
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 483f72ba030..f6b00c3df42 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -11,9 +11,9 @@
11#define KMSG_COMPONENT "vmur" 11#define KMSG_COMPONENT "vmur"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/kernel_stat.h>
14#include <linux/cdev.h> 15#include <linux/cdev.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/module.h>
17 17
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19#include <asm/cio.h> 19#include <asm/cio.h>
@@ -74,7 +74,6 @@ static struct ccw_driver ur_driver = {
74 .set_online = ur_set_online, 74 .set_online = ur_set_online,
75 .set_offline = ur_set_offline, 75 .set_offline = ur_set_offline,
76 .freeze = ur_pm_suspend, 76 .freeze = ur_pm_suspend,
77 .int_class = IRQIO_VMR,
78}; 77};
79 78
80static DEFINE_MUTEX(vmur_mutex); 79static DEFINE_MUTEX(vmur_mutex);
@@ -306,6 +305,7 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
306{ 305{
307 struct urdev *urd; 306 struct urdev *urd;
308 307
308 kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++;
309 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 309 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n",
310 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 310 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
311 irb->scsw.cmd.count); 311 irb->scsw.cmd.count);
@@ -903,7 +903,7 @@ static int ur_set_online(struct ccw_device *cdev)
903 goto fail_urdev_put; 903 goto fail_urdev_put;
904 } 904 }
905 905
906 urd->char_device->ops = &ur_fops; 906 cdev_init(urd->char_device, &ur_fops);
907 urd->char_device->dev = MKDEV(major, minor); 907 urd->char_device->dev = MKDEV(major, minor);
908 urd->char_device->owner = ur_fops.owner; 908 urd->char_device->owner = ur_fops.owner;
909 909
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index e9b72311e25..11312f401c7 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Watchdog implementation based on z/VM Watchdog Timer API 2 * Watchdog implementation based on z/VM Watchdog Timer API
3 * 3 *
4 * Copyright IBM Corp. 2004, 2009 4 * Copyright IBM Corp. 2004,2009
5 * 5 *
6 * The user space watchdog daemon can use this driver as 6 * The user space watchdog daemon can use this driver as
7 * /dev/vmwatchdog to have z/VM execute the specified CP 7 * /dev/vmwatchdog to have z/VM execute the specified CP
@@ -28,9 +28,9 @@
28#define MAX_CMDLEN 240 28#define MAX_CMDLEN 240
29#define MIN_INTERVAL 15 29#define MIN_INTERVAL 15
30static char vmwdt_cmd[MAX_CMDLEN] = "IPL"; 30static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
31static bool vmwdt_conceal; 31static int vmwdt_conceal;
32 32
33static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT; 33static int vmwdt_nowayout = WATCHDOG_NOWAYOUT;
34 34
35MODULE_LICENSE("GPL"); 35MODULE_LICENSE("GPL");
36MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); 36MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index e3b9308b0fe..3b94044027c 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * For more information please refer to Documentation/s390/zfcpdump.txt 6 * For more information please refer to Documentation/s390/zfcpdump.txt
7 * 7 *
8 * Copyright IBM Corp. 2003, 2008 8 * Copyright IBM Corp. 2003,2008
9 * Author(s): Michael Holzheu 9 * Author(s): Michael Holzheu
10 */ 10 */
11 11
@@ -16,11 +16,11 @@
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
18#include <linux/debugfs.h> 18#include <linux/debugfs.h>
19#include <linux/module.h>
20#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
21#include <asm/ipl.h> 20#include <asm/ipl.h>
22#include <asm/sclp.h> 21#include <asm/sclp.h>
23#include <asm/setup.h> 22#include <asm/setup.h>
23#include <asm/sigp.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/debug.h> 25#include <asm/debug.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
@@ -142,6 +142,22 @@ static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
142 return memcpy_hsa(dest, src, count, TO_KERNEL); 142 return memcpy_hsa(dest, src, count, TO_KERNEL);
143} 143}
144 144
145static int memcpy_real_user(void __user *dest, unsigned long src, size_t count)
146{
147 static char buf[4096];
148 int offs = 0, size;
149
150 while (offs < count) {
151 size = min(sizeof(buf), count - offs);
152 if (memcpy_real(buf, (void *) src + offs, size))
153 return -EFAULT;
154 if (copy_to_user(dest + offs, buf, size))
155 return -EFAULT;
156 offs += size;
157 }
158 return 0;
159}
160
145static int __init init_cpu_info(enum arch_id arch) 161static int __init init_cpu_info(enum arch_id arch)
146{ 162{
147 struct save_area *sa; 163 struct save_area *sa;
@@ -330,8 +346,8 @@ static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
330 346
331 /* Copy from real mem */ 347 /* Copy from real mem */
332 size = count - mem_offs - hdr_count; 348 size = count - mem_offs - hdr_count;
333 rc = copy_to_user_real(buf + hdr_count + mem_offs, 349 rc = memcpy_real_user(buf + hdr_count + mem_offs, mem_start + mem_offs,
334 (void *) mem_start + mem_offs, size); 350 size);
335 if (rc) 351 if (rc)
336 goto fail; 352 goto fail;
337 353
@@ -640,8 +656,6 @@ static int __init zcore_init(void)
640 656
641 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 657 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
642 return -ENODATA; 658 return -ENODATA;
643 if (OLDMEM_BASE)
644 return -ENODATA;
645 659
646 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); 660 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
647 debug_register_view(zcore_dbf, &debug_sprintf_view); 661 debug_register_view(zcore_dbf, &debug_sprintf_view);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index 8c4a386e97f..e1b700a1964 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -8,8 +8,6 @@ ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o 10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
11obj-$(CONFIG_EADM_SCH) += eadm_sch.o
12obj-$(CONFIG_SCM_BUS) += scm.o
13obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
14 12
15qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o 13qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index bc10220f684..65d2e769dfa 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/cio/airq.c
2 * Support for adapter interruptions 3 * Support for adapter interruptions
3 * 4 *
4 * Copyright IBM Corp. 1999, 2007 5 * Copyright IBM Corp. 1999,2007
5 * Author(s): Ingo Adlung <adlung@de.ibm.com> 6 * Author(s): Ingo Adlung <adlung@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 * Arnd Bergmann <arndb@de.ibm.com> 8 * Arnd Bergmann <arndb@de.ibm.com>
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 2d2a966a3b3..76058a5166e 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * drivers/s390/cio/blacklist.c
2 * S/390 common I/O routines -- blacklisting of specific devices 3 * S/390 common I/O routines -- blacklisting of specific devices
3 * 4 *
4 * Copyright IBM Corp. 1999, 2002 5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com) 9 * Arnd Bergmann (arndb@de.ibm.com)
@@ -333,9 +335,10 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
333 return -EINVAL; 335 return -EINVAL;
334 if (user_len > 65536) 336 if (user_len > 65536)
335 user_len = 65536; 337 user_len = 65536;
336 buf = vzalloc(user_len + 1); /* maybe better use the stack? */ 338 buf = vmalloc (user_len + 1); /* maybe better use the stack? */
337 if (buf == NULL) 339 if (buf == NULL)
338 return -ENOMEM; 340 return -ENOMEM;
341 memset(buf, 0, user_len + 1);
339 342
340 if (strncpy_from_user (buf, user_buf, user_len) < 0) { 343 if (strncpy_from_user (buf, user_buf, user_len) < 0) {
341 rc = -EFAULT; 344 rc = -EFAULT;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 84846c2b96d..cda9bd6e48e 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bus driver for ccwgroup 2 * bus driver for ccwgroup
3 * 3 *
4 * Copyright IBM Corp. 2002, 2012 4 * Copyright IBM Corp. 2002, 2009
5 * 5 *
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -15,13 +15,10 @@
15#include <linux/ctype.h> 15#include <linux/ctype.h>
16#include <linux/dcache.h> 16#include <linux/dcache.h>
17 17
18#include <asm/cio.h>
19#include <asm/ccwdev.h> 18#include <asm/ccwdev.h>
20#include <asm/ccwgroup.h> 19#include <asm/ccwgroup.h>
21 20
22#include "device.h" 21#define CCW_BUS_ID_SIZE 20
23
24#define CCW_BUS_ID_SIZE 10
25 22
26/* In Linux 2.4, we had a channel device layer called "chandev" 23/* In Linux 2.4, we had a channel device layer called "chandev"
27 * that did all sorts of obscure stuff for networking devices. 24 * that did all sorts of obscure stuff for networking devices.
@@ -30,9 +27,33 @@
30 * to devices that use multiple subchannels. 27 * to devices that use multiple subchannels.
31 */ 28 */
32 29
30/* a device matches a driver if all its slave devices match the same
31 * entry of the driver */
32static int
33ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
34{
35 struct ccwgroup_device *gdev;
36 struct ccwgroup_driver *gdrv;
37
38 gdev = to_ccwgroupdev(dev);
39 gdrv = to_ccwgroupdrv(drv);
40
41 if (gdev->creator_id == gdrv->driver_id)
42 return 1;
43
44 return 0;
45}
46static int
47ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
48{
49 /* TODO */
50 return 0;
51}
52
33static struct bus_type ccwgroup_bus_type; 53static struct bus_type ccwgroup_bus_type;
34 54
35static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 55static void
56__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
36{ 57{
37 int i; 58 int i;
38 char str[8]; 59 char str[8];
@@ -42,6 +63,7 @@ static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
42 sysfs_remove_link(&gdev->dev.kobj, str); 63 sysfs_remove_link(&gdev->dev.kobj, str);
43 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); 64 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
44 } 65 }
66
45} 67}
46 68
47/* 69/*
@@ -65,105 +87,12 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
65 } 87 }
66} 88}
67 89
68/**
69 * ccwgroup_set_online() - enable a ccwgroup device
70 * @gdev: target ccwgroup device
71 *
72 * This function attempts to put the ccwgroup device into the online state.
73 * Returns:
74 * %0 on success and a negative error value on failure.
75 */
76int ccwgroup_set_online(struct ccwgroup_device *gdev)
77{
78 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
79 int ret = -EINVAL;
80
81 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
82 return -EAGAIN;
83 if (gdev->state == CCWGROUP_ONLINE)
84 goto out;
85 if (gdrv->set_online)
86 ret = gdrv->set_online(gdev);
87 if (ret)
88 goto out;
89
90 gdev->state = CCWGROUP_ONLINE;
91out:
92 atomic_set(&gdev->onoff, 0);
93 return ret;
94}
95EXPORT_SYMBOL(ccwgroup_set_online);
96
97/**
98 * ccwgroup_set_offline() - disable a ccwgroup device
99 * @gdev: target ccwgroup device
100 *
101 * This function attempts to put the ccwgroup device into the offline state.
102 * Returns:
103 * %0 on success and a negative error value on failure.
104 */
105int ccwgroup_set_offline(struct ccwgroup_device *gdev)
106{
107 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
108 int ret = -EINVAL;
109
110 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
111 return -EAGAIN;
112 if (gdev->state == CCWGROUP_OFFLINE)
113 goto out;
114 if (gdrv->set_offline)
115 ret = gdrv->set_offline(gdev);
116 if (ret)
117 goto out;
118
119 gdev->state = CCWGROUP_OFFLINE;
120out:
121 atomic_set(&gdev->onoff, 0);
122 return ret;
123}
124EXPORT_SYMBOL(ccwgroup_set_offline);
125
126static ssize_t ccwgroup_online_store(struct device *dev, 90static ssize_t ccwgroup_online_store(struct device *dev,
127 struct device_attribute *attr, 91 struct device_attribute *attr,
128 const char *buf, size_t count) 92 const char *buf, size_t count);
129{
130 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
131 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
132 unsigned long value;
133 int ret;
134
135 if (!dev->driver)
136 return -EINVAL;
137 if (!try_module_get(gdrv->driver.owner))
138 return -EINVAL;
139
140 ret = strict_strtoul(buf, 0, &value);
141 if (ret)
142 goto out;
143
144 if (value == 1)
145 ret = ccwgroup_set_online(gdev);
146 else if (value == 0)
147 ret = ccwgroup_set_offline(gdev);
148 else
149 ret = -EINVAL;
150out:
151 module_put(gdrv->driver.owner);
152 return (ret == 0) ? count : ret;
153}
154
155static ssize_t ccwgroup_online_show(struct device *dev, 93static ssize_t ccwgroup_online_show(struct device *dev,
156 struct device_attribute *attr, 94 struct device_attribute *attr,
157 char *buf) 95 char *buf);
158{
159 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
160 int online;
161
162 online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
163
164 return scnprintf(buf, PAGE_SIZE, "%d\n", online);
165}
166
167/* 96/*
168 * Provide an 'ungroup' attribute so the user can remove group devices no 97 * Provide an 'ungroup' attribute so the user can remove group devices no
169 * longer needed or accidentially created. Saves memory :) 98 * longer needed or accidentially created. Saves memory :)
@@ -181,13 +110,14 @@ static void ccwgroup_ungroup_callback(struct device *dev)
181 mutex_unlock(&gdev->reg_mutex); 110 mutex_unlock(&gdev->reg_mutex);
182} 111}
183 112
184static ssize_t ccwgroup_ungroup_store(struct device *dev, 113static ssize_t
185 struct device_attribute *attr, 114ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
186 const char *buf, size_t count)
187{ 115{
188 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 116 struct ccwgroup_device *gdev;
189 int rc; 117 int rc;
190 118
119 gdev = to_ccwgroupdev(dev);
120
191 /* Prevent concurrent online/offline processing and ungrouping. */ 121 /* Prevent concurrent online/offline processing and ungrouping. */
192 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) 122 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
193 return -EAGAIN; 123 return -EAGAIN;
@@ -208,6 +138,7 @@ out:
208 } 138 }
209 return count; 139 return count;
210} 140}
141
211static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); 142static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
212static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); 143static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
213 144
@@ -224,19 +155,21 @@ static const struct attribute_group *ccwgroup_attr_groups[] = {
224 NULL, 155 NULL,
225}; 156};
226 157
227static void ccwgroup_release(struct device *dev) 158static void
159ccwgroup_release (struct device *dev)
228{ 160{
229 kfree(to_ccwgroupdev(dev)); 161 kfree(to_ccwgroupdev(dev));
230} 162}
231 163
232static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 164static int
165__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
233{ 166{
234 char str[8]; 167 char str[8];
235 int i, rc; 168 int i, rc;
236 169
237 for (i = 0; i < gdev->count; i++) { 170 for (i = 0; i < gdev->count; i++) {
238 rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, 171 rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
239 &gdev->dev.kobj, "group_device"); 172 "group_device");
240 if (rc) { 173 if (rc) {
241 for (--i; i >= 0; i--) 174 for (--i; i >= 0; i--)
242 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, 175 sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
@@ -246,8 +179,8 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
246 } 179 }
247 for (i = 0; i < gdev->count; i++) { 180 for (i = 0; i < gdev->count; i++) {
248 sprintf(str, "cdev%d", i); 181 sprintf(str, "cdev%d", i);
249 rc = sysfs_create_link(&gdev->dev.kobj, 182 rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
250 &gdev->cdev[i]->dev.kobj, str); 183 str);
251 if (rc) { 184 if (rc) {
252 for (--i; i >= 0; i--) { 185 for (--i; i >= 0; i--) {
253 sprintf(str, "cdev%d", i); 186 sprintf(str, "cdev%d", i);
@@ -262,10 +195,9 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
262 return 0; 195 return 0;
263} 196}
264 197
265static int __get_next_id(const char **buf, struct ccw_dev_id *id) 198static int __get_next_bus_id(const char **buf, char *bus_id)
266{ 199{
267 unsigned int cssid, ssid, devno; 200 int rc, len;
268 int ret = 0, len;
269 char *start, *end; 201 char *start, *end;
270 202
271 start = (char *)*buf; 203 start = (char *)*buf;
@@ -280,40 +212,49 @@ static int __get_next_id(const char **buf, struct ccw_dev_id *id)
280 len = end - start + 1; 212 len = end - start + 1;
281 end++; 213 end++;
282 } 214 }
283 if (len <= CCW_BUS_ID_SIZE) { 215 if (len < CCW_BUS_ID_SIZE) {
284 if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3) 216 strlcpy(bus_id, start, len);
285 ret = -EINVAL; 217 rc = 0;
286 } else 218 } else
287 ret = -EINVAL; 219 rc = -EINVAL;
288
289 if (!ret) {
290 id->ssid = ssid;
291 id->devno = devno;
292 }
293 *buf = end; 220 *buf = end;
294 return ret; 221 return rc;
222}
223
224static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
225{
226 int cssid, ssid, devno;
227
228 /* Must be of form %x.%x.%04x */
229 if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
230 return 0;
231 return 1;
295} 232}
296 233
297/** 234/**
298 * ccwgroup_create_dev() - create and register a ccw group device 235 * ccwgroup_create_from_string() - create and register a ccw group device
299 * @parent: parent device for the new device 236 * @root: parent device for the new device
300 * @gdrv: driver for the new group device 237 * @creator_id: identifier of creating driver
238 * @cdrv: ccw driver of slave devices
301 * @num_devices: number of slave devices 239 * @num_devices: number of slave devices
302 * @buf: buffer containing comma separated bus ids of slave devices 240 * @buf: buffer containing comma separated bus ids of slave devices
303 * 241 *
304 * Create and register a new ccw group device as a child of @parent. Slave 242 * Create and register a new ccw group device as a child of @root. Slave
305 * devices are obtained from the list of bus ids given in @buf. 243 * devices are obtained from the list of bus ids given in @buf and must all
244 * belong to @cdrv.
306 * Returns: 245 * Returns:
307 * %0 on success and an error code on failure. 246 * %0 on success and an error code on failure.
308 * Context: 247 * Context:
309 * non-atomic 248 * non-atomic
310 */ 249 */
311int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, 250int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
312 int num_devices, const char *buf) 251 struct ccw_driver *cdrv, int num_devices,
252 const char *buf)
313{ 253{
314 struct ccwgroup_device *gdev; 254 struct ccwgroup_device *gdev;
315 struct ccw_dev_id dev_id;
316 int rc, i; 255 int rc, i;
256 char tmp_bus_id[CCW_BUS_ID_SIZE];
257 const char *curr_buf;
317 258
318 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), 259 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
319 GFP_KERNEL); 260 GFP_KERNEL);
@@ -323,24 +264,29 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
323 atomic_set(&gdev->onoff, 0); 264 atomic_set(&gdev->onoff, 0);
324 mutex_init(&gdev->reg_mutex); 265 mutex_init(&gdev->reg_mutex);
325 mutex_lock(&gdev->reg_mutex); 266 mutex_lock(&gdev->reg_mutex);
267 gdev->creator_id = creator_id;
326 gdev->count = num_devices; 268 gdev->count = num_devices;
327 gdev->dev.bus = &ccwgroup_bus_type; 269 gdev->dev.bus = &ccwgroup_bus_type;
328 gdev->dev.parent = parent; 270 gdev->dev.parent = root;
329 gdev->dev.release = ccwgroup_release; 271 gdev->dev.release = ccwgroup_release;
330 device_initialize(&gdev->dev); 272 device_initialize(&gdev->dev);
331 273
332 for (i = 0; i < num_devices && buf; i++) { 274 curr_buf = buf;
333 rc = __get_next_id(&buf, &dev_id); 275 for (i = 0; i < num_devices && curr_buf; i++) {
276 rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
334 if (rc != 0) 277 if (rc != 0)
335 goto error; 278 goto error;
336 gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id); 279 if (!__is_valid_bus_id(tmp_bus_id)) {
280 rc = -EINVAL;
281 goto error;
282 }
283 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
337 /* 284 /*
338 * All devices have to be of the same type in 285 * All devices have to be of the same type in
339 * order to be grouped. 286 * order to be grouped.
340 */ 287 */
341 if (!gdev->cdev[i] || !gdev->cdev[i]->drv || 288 if (!gdev->cdev[i]
342 gdev->cdev[i]->drv != gdev->cdev[0]->drv || 289 || gdev->cdev[i]->id.driver_info !=
343 gdev->cdev[i]->id.driver_info !=
344 gdev->cdev[0]->id.driver_info) { 290 gdev->cdev[0]->id.driver_info) {
345 rc = -EINVAL; 291 rc = -EINVAL;
346 goto error; 292 goto error;
@@ -356,35 +302,29 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
356 spin_unlock_irq(gdev->cdev[i]->ccwlock); 302 spin_unlock_irq(gdev->cdev[i]->ccwlock);
357 } 303 }
358 /* Check for sufficient number of bus ids. */ 304 /* Check for sufficient number of bus ids. */
359 if (i < num_devices) { 305 if (i < num_devices && !curr_buf) {
360 rc = -EINVAL; 306 rc = -EINVAL;
361 goto error; 307 goto error;
362 } 308 }
363 /* Check for trailing stuff. */ 309 /* Check for trailing stuff. */
364 if (i == num_devices && strlen(buf) > 0) { 310 if (i == num_devices && strlen(curr_buf) > 0) {
365 rc = -EINVAL; 311 rc = -EINVAL;
366 goto error; 312 goto error;
367 } 313 }
368 314
369 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); 315 dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
370 gdev->dev.groups = ccwgroup_attr_groups; 316 gdev->dev.groups = ccwgroup_attr_groups;
371
372 if (gdrv) {
373 gdev->dev.driver = &gdrv->driver;
374 rc = gdrv->setup ? gdrv->setup(gdev) : 0;
375 if (rc)
376 goto error;
377 }
378 rc = device_add(&gdev->dev); 317 rc = device_add(&gdev->dev);
379 if (rc) 318 if (rc)
380 goto error; 319 goto error;
320 get_device(&gdev->dev);
381 rc = __ccwgroup_create_symlinks(gdev); 321 rc = __ccwgroup_create_symlinks(gdev);
382 if (rc) { 322 if (!rc) {
383 device_del(&gdev->dev); 323 mutex_unlock(&gdev->reg_mutex);
384 goto error; 324 put_device(&gdev->dev);
325 return 0;
385 } 326 }
386 mutex_unlock(&gdev->reg_mutex); 327 device_unregister(&gdev->dev);
387 return 0;
388error: 328error:
389 for (i = 0; i < num_devices; i++) 329 for (i = 0; i < num_devices; i++)
390 if (gdev->cdev[i]) { 330 if (gdev->cdev[i]) {
@@ -399,18 +339,10 @@ error:
399 put_device(&gdev->dev); 339 put_device(&gdev->dev);
400 return rc; 340 return rc;
401} 341}
402EXPORT_SYMBOL(ccwgroup_create_dev); 342EXPORT_SYMBOL(ccwgroup_create_from_string);
403 343
404static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action, 344static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
405 void *data) 345 void *data);
406{
407 struct device *dev = data;
408
409 if (action == BUS_NOTIFY_UNBIND_DRIVER)
410 device_schedule_callback(dev, ccwgroup_ungroup_callback);
411
412 return NOTIFY_OK;
413}
414 346
415static struct notifier_block ccwgroup_nb = { 347static struct notifier_block ccwgroup_nb = {
416 .notifier_call = ccwgroup_notifier 348 .notifier_call = ccwgroup_notifier
@@ -442,13 +374,128 @@ module_exit(cleanup_ccwgroup);
442 374
443/************************** driver stuff ******************************/ 375/************************** driver stuff ******************************/
444 376
445static int ccwgroup_remove(struct device *dev) 377static int
378ccwgroup_set_online(struct ccwgroup_device *gdev)
446{ 379{
447 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 380 struct ccwgroup_driver *gdrv;
448 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); 381 int ret;
382
383 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
384 return -EAGAIN;
385 if (gdev->state == CCWGROUP_ONLINE) {
386 ret = 0;
387 goto out;
388 }
389 if (!gdev->dev.driver) {
390 ret = -EINVAL;
391 goto out;
392 }
393 gdrv = to_ccwgroupdrv (gdev->dev.driver);
394 if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
395 goto out;
396
397 gdev->state = CCWGROUP_ONLINE;
398 out:
399 atomic_set(&gdev->onoff, 0);
400 return ret;
401}
402
403static int
404ccwgroup_set_offline(struct ccwgroup_device *gdev)
405{
406 struct ccwgroup_driver *gdrv;
407 int ret;
408
409 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
410 return -EAGAIN;
411 if (gdev->state == CCWGROUP_OFFLINE) {
412 ret = 0;
413 goto out;
414 }
415 if (!gdev->dev.driver) {
416 ret = -EINVAL;
417 goto out;
418 }
419 gdrv = to_ccwgroupdrv (gdev->dev.driver);
420 if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
421 goto out;
422
423 gdev->state = CCWGROUP_OFFLINE;
424 out:
425 atomic_set(&gdev->onoff, 0);
426 return ret;
427}
428
429static ssize_t
430ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
431{
432 struct ccwgroup_device *gdev;
433 struct ccwgroup_driver *gdrv;
434 unsigned long value;
435 int ret;
436
437 if (!dev->driver)
438 return -EINVAL;
439
440 gdev = to_ccwgroupdev(dev);
441 gdrv = to_ccwgroupdrv(dev->driver);
442
443 if (!try_module_get(gdrv->driver.owner))
444 return -EINVAL;
445
446 ret = strict_strtoul(buf, 0, &value);
447 if (ret)
448 goto out;
449
450 if (value == 1)
451 ret = ccwgroup_set_online(gdev);
452 else if (value == 0)
453 ret = ccwgroup_set_offline(gdev);
454 else
455 ret = -EINVAL;
456out:
457 module_put(gdrv->driver.owner);
458 return (ret == 0) ? count : ret;
459}
460
461static ssize_t
462ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
463{
464 int online;
465
466 online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
467
468 return sprintf(buf, online ? "1\n" : "0\n");
469}
470
471static int
472ccwgroup_probe (struct device *dev)
473{
474 struct ccwgroup_device *gdev;
475 struct ccwgroup_driver *gdrv;
476
477 int ret;
478
479 gdev = to_ccwgroupdev(dev);
480 gdrv = to_ccwgroupdrv(dev->driver);
481
482 ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
483
484 return ret;
485}
486
487static int
488ccwgroup_remove (struct device *dev)
489{
490 struct ccwgroup_device *gdev;
491 struct ccwgroup_driver *gdrv;
449 492
450 if (!dev->driver) 493 if (!dev->driver)
451 return 0; 494 return 0;
495
496 gdev = to_ccwgroupdev(dev);
497 gdrv = to_ccwgroupdrv(dev->driver);
498
452 if (gdrv->remove) 499 if (gdrv->remove)
453 gdrv->remove(gdev); 500 gdrv->remove(gdev);
454 501
@@ -457,11 +504,15 @@ static int ccwgroup_remove(struct device *dev)
457 504
458static void ccwgroup_shutdown(struct device *dev) 505static void ccwgroup_shutdown(struct device *dev)
459{ 506{
460 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 507 struct ccwgroup_device *gdev;
461 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); 508 struct ccwgroup_driver *gdrv;
462 509
463 if (!dev->driver) 510 if (!dev->driver)
464 return; 511 return;
512
513 gdev = to_ccwgroupdev(dev);
514 gdrv = to_ccwgroupdrv(dev->driver);
515
465 if (gdrv->shutdown) 516 if (gdrv->shutdown)
466 gdrv->shutdown(gdev); 517 gdrv->shutdown(gdev);
467} 518}
@@ -536,11 +587,27 @@ static const struct dev_pm_ops ccwgroup_pm_ops = {
536 587
537static struct bus_type ccwgroup_bus_type = { 588static struct bus_type ccwgroup_bus_type = {
538 .name = "ccwgroup", 589 .name = "ccwgroup",
590 .match = ccwgroup_bus_match,
591 .uevent = ccwgroup_uevent,
592 .probe = ccwgroup_probe,
539 .remove = ccwgroup_remove, 593 .remove = ccwgroup_remove,
540 .shutdown = ccwgroup_shutdown, 594 .shutdown = ccwgroup_shutdown,
541 .pm = &ccwgroup_pm_ops, 595 .pm = &ccwgroup_pm_ops,
542}; 596};
543 597
598
599static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
600 void *data)
601{
602 struct device *dev = data;
603
604 if (action == BUS_NOTIFY_UNBIND_DRIVER)
605 device_schedule_callback(dev, ccwgroup_ungroup_callback);
606
607 return NOTIFY_OK;
608}
609
610
544/** 611/**
545 * ccwgroup_driver_register() - register a ccw group driver 612 * ccwgroup_driver_register() - register a ccw group driver
546 * @cdriver: driver to be registered 613 * @cdriver: driver to be registered
@@ -554,9 +621,9 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
554 621
555 return driver_register(&cdriver->driver); 622 return driver_register(&cdriver->driver);
556} 623}
557EXPORT_SYMBOL(ccwgroup_driver_register);
558 624
559static int __ccwgroup_match_all(struct device *dev, void *data) 625static int
626__ccwgroup_match_all(struct device *dev, void *data)
560{ 627{
561 return 1; 628 return 1;
562} 629}
@@ -572,6 +639,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
572 struct device *dev; 639 struct device *dev;
573 640
574 /* We don't want ccwgroup devices to live longer than their driver. */ 641 /* We don't want ccwgroup devices to live longer than their driver. */
642 get_driver(&cdriver->driver);
575 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL, 643 while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
576 __ccwgroup_match_all))) { 644 __ccwgroup_match_all))) {
577 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 645 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
@@ -583,9 +651,9 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
583 mutex_unlock(&gdev->reg_mutex); 651 mutex_unlock(&gdev->reg_mutex);
584 put_device(dev); 652 put_device(dev);
585 } 653 }
654 put_driver(&cdriver->driver);
586 driver_unregister(&cdriver->driver); 655 driver_unregister(&cdriver->driver);
587} 656}
588EXPORT_SYMBOL(ccwgroup_driver_unregister);
589 657
590/** 658/**
591 * ccwgroup_probe_ccwdev() - probe function for slave devices 659 * ccwgroup_probe_ccwdev() - probe function for slave devices
@@ -600,7 +668,6 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
600{ 668{
601 return 0; 669 return 0;
602} 670}
603EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
604 671
605/** 672/**
606 * ccwgroup_remove_ccwdev() - remove function for slave devices 673 * ccwgroup_remove_ccwdev() - remove function for slave devices
@@ -637,5 +704,9 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
637 /* Release ccwgroup device reference for local processing. */ 704 /* Release ccwgroup device reference for local processing. */
638 put_device(&gdev->dev); 705 put_device(&gdev->dev);
639} 706}
640EXPORT_SYMBOL(ccwgroup_remove_ccwdev); 707
641MODULE_LICENSE("GPL"); 708MODULE_LICENSE("GPL");
709EXPORT_SYMBOL(ccwgroup_driver_register);
710EXPORT_SYMBOL(ccwgroup_driver_unregister);
711EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
712EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 5156264d0c7..d15f8b4d78b 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -1,13 +1,10 @@
1/* 1/*
2 * Handling of internal CCW device requests. 2 * Handling of internal CCW device requests.
3 * 3 *
4 * Copyright IBM Corp. 2009, 2011 4 * Copyright IBM Corp. 2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
8#define KMSG_COMPONENT "cio"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/types.h> 8#include <linux/types.h>
12#include <linux/err.h> 9#include <linux/err.h>
13#include <asm/ccwdev.h> 10#include <asm/ccwdev.h>
@@ -326,21 +323,7 @@ void ccw_request_timeout(struct ccw_device *cdev)
326{ 323{
327 struct subchannel *sch = to_subchannel(cdev->dev.parent); 324 struct subchannel *sch = to_subchannel(cdev->dev.parent);
328 struct ccw_request *req = &cdev->private->req; 325 struct ccw_request *req = &cdev->private->req;
329 int rc = -ENODEV, chp; 326 int rc;
330
331 if (cio_update_schib(sch))
332 goto err;
333
334 for (chp = 0; chp < 8; chp++) {
335 if ((0x80 >> chp) & sch->schib.pmcw.lpum)
336 pr_warning("%s: No interrupt was received within %lus "
337 "(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
338 dev_name(&cdev->dev), req->timeout / HZ,
339 scsw_cstat(&sch->schib.scsw),
340 scsw_dstat(&sch->schib.scsw),
341 sch->schid.cssid,
342 sch->schib.pmcw.chpid[chp]);
343 }
344 327
345 if (!ccwreq_next_path(cdev)) { 328 if (!ccwreq_next_path(cdev)) {
346 /* set the final return code for this request */ 329 /* set the final return code for this request */
@@ -359,7 +342,7 @@ err:
359 * ccw_request_notoper - notoper handler for I/O request procedure 342 * ccw_request_notoper - notoper handler for I/O request procedure
360 * @cdev: ccw device 343 * @cdev: ccw device
361 * 344 *
362 * Handle notoper during I/O request procedure. 345 * Handle timeout during I/O request procedure.
363 */ 346 */
364void ccw_request_notoper(struct ccw_device *cdev) 347void ccw_request_notoper(struct ccw_device *cdev)
365{ 348{
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 50ad5fdd815..2d32233943a 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 1999, 2010 2 * drivers/s390/cio/chp.c
3 *
4 * Copyright IBM Corp. 1999,2010
3 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
4 * Arnd Bergmann (arndb@de.ibm.com) 6 * Arnd Bergmann (arndb@de.ibm.com)
5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -8,8 +10,6 @@
8#include <linux/bug.h> 10#include <linux/bug.h>
9#include <linux/workqueue.h> 11#include <linux/workqueue.h>
10#include <linux/spinlock.h> 12#include <linux/spinlock.h>
11#include <linux/export.h>
12#include <linux/sched.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/jiffies.h> 14#include <linux/jiffies.h>
15#include <linux/wait.h> 15#include <linux/wait.h>
@@ -360,13 +360,10 @@ static struct attribute *chp_attrs[] = {
360 &dev_attr_shared.attr, 360 &dev_attr_shared.attr,
361 NULL, 361 NULL,
362}; 362};
363
363static struct attribute_group chp_attr_group = { 364static struct attribute_group chp_attr_group = {
364 .attrs = chp_attrs, 365 .attrs = chp_attrs,
365}; 366};
366static const struct attribute_group *chp_attr_groups[] = {
367 &chp_attr_group,
368 NULL,
369};
370 367
371static void chp_release(struct device *dev) 368static void chp_release(struct device *dev)
372{ 369{
@@ -398,7 +395,6 @@ int chp_new(struct chp_id chpid)
398 chp->chpid = chpid; 395 chp->chpid = chpid;
399 chp->state = 1; 396 chp->state = 1;
400 chp->dev.parent = &channel_subsystems[chpid.cssid]->device; 397 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
401 chp->dev.groups = chp_attr_groups;
402 chp->dev.release = chp_release; 398 chp->dev.release = chp_release;
403 mutex_init(&chp->lock); 399 mutex_init(&chp->lock);
404 400
@@ -428,10 +424,16 @@ int chp_new(struct chp_id chpid)
428 put_device(&chp->dev); 424 put_device(&chp->dev);
429 goto out; 425 goto out;
430 } 426 }
427 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
428 if (ret) {
429 device_unregister(&chp->dev);
430 goto out;
431 }
431 mutex_lock(&channel_subsystems[chpid.cssid]->mutex); 432 mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
432 if (channel_subsystems[chpid.cssid]->cm_enabled) { 433 if (channel_subsystems[chpid.cssid]->cm_enabled) {
433 ret = chp_add_cmg_attr(chp); 434 ret = chp_add_cmg_attr(chp);
434 if (ret) { 435 if (ret) {
436 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
435 device_unregister(&chp->dev); 437 device_unregister(&chp->dev);
436 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex); 438 mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
437 goto out; 439 goto out;
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index e1399dbee83..12b4903d6fe 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2007, 2010 2 * drivers/s390/cio/chp.h
3 *
4 * Copyright IBM Corp. 2007,2010
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 6 */
5 7
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 68e80e2734a..75c3f1f8fd4 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/cio/chsc.c
2 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
3 * 4 *
4 * Copyright IBM Corp. 1999,2012 5 * Copyright IBM Corp. 1999,2010
5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -14,7 +15,6 @@
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/init.h> 16#include <linux/init.h>
16#include <linux/device.h> 17#include <linux/device.h>
17#include <linux/pci.h>
18 18
19#include <asm/cio.h> 19#include <asm/cio.h>
20#include <asm/chpid.h> 20#include <asm/chpid.h>
@@ -53,11 +53,6 @@ int chsc_error_from_response(int response)
53 return -EINVAL; 53 return -EINVAL;
54 case 0x0004: 54 case 0x0004:
55 return -EOPNOTSUPP; 55 return -EOPNOTSUPP;
56 case 0x000b:
57 return -EBUSY;
58 case 0x0100:
59 case 0x0102:
60 return -ENOMEM;
61 default: 56 default:
62 return -EIO; 57 return -EIO;
63 } 58 }
@@ -261,45 +256,26 @@ __get_chpid_from_lir(void *data)
261 return (u16) (lir->indesc[0]&0x000000ff); 256 return (u16) (lir->indesc[0]&0x000000ff);
262} 257}
263 258
264struct chsc_sei_nt0_area { 259struct chsc_sei_area {
265 u8 flags;
266 u8 vf; /* validity flags */
267 u8 rs; /* reporting source */
268 u8 cc; /* content code */
269 u16 fla; /* full link address */
270 u16 rsid; /* reporting source id */
271 u32 reserved1;
272 u32 reserved2;
273 /* ccdf has to be big enough for a link-incident record */
274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
275} __packed;
276
277struct chsc_sei_nt2_area {
278 u8 flags; /* p and v bit */
279 u8 reserved1;
280 u8 reserved2;
281 u8 cc; /* content code */
282 u32 reserved3[13];
283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
284} __packed;
285
286#define CHSC_SEI_NT0 0ULL
287#define CHSC_SEI_NT2 (1ULL << 61)
288
289struct chsc_sei {
290 struct chsc_header request; 260 struct chsc_header request;
291 u32 reserved1; 261 u32 reserved1;
292 u64 ntsm; /* notification type mask */
293 struct chsc_header response;
294 u32 reserved2; 262 u32 reserved2;
295 union { 263 u32 reserved3;
296 struct chsc_sei_nt0_area nt0_area; 264 struct chsc_header response;
297 struct chsc_sei_nt2_area nt2_area; 265 u32 reserved4;
298 u8 nt_area[PAGE_SIZE - 24]; 266 u8 flags;
299 } u; 267 u8 vf; /* validity flags */
300} __packed; 268 u8 rs; /* reporting source */
301 269 u8 cc; /* content code */
302static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 270 u16 fla; /* full link address */
271 u16 rsid; /* reporting source id */
272 u32 reserved5;
273 u32 reserved6;
274 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
275 /* ccdf has to be big enough for a link-incident record */
276} __attribute__ ((packed));
277
278static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
303{ 279{
304 struct chp_id chpid; 280 struct chp_id chpid;
305 int id; 281 int id;
@@ -318,7 +294,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
318 } 294 }
319} 295}
320 296
321static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 297static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
322{ 298{
323 struct chp_link link; 299 struct chp_link link;
324 struct chp_id chpid; 300 struct chp_id chpid;
@@ -350,7 +326,7 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
350 s390_process_res_acc(&link); 326 s390_process_res_acc(&link);
351} 327}
352 328
353static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 329static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
354{ 330{
355 struct channel_path *chp; 331 struct channel_path *chp;
356 struct chp_id chpid; 332 struct chp_id chpid;
@@ -386,7 +362,7 @@ struct chp_config_data {
386 u8 pc; 362 u8 pc;
387}; 363};
388 364
389static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 365static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
390{ 366{
391 struct chp_config_data *data; 367 struct chp_config_data *data;
392 struct chp_id chpid; 368 struct chp_id chpid;
@@ -418,40 +394,13 @@ static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
418 } 394 }
419} 395}
420 396
421static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 397static void chsc_process_sei(struct chsc_sei_area *sei_area)
422{
423 int ret;
424
425 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
426 if (sei_area->rs != 7)
427 return;
428
429 ret = scm_update_information();
430 if (ret)
431 CIO_CRW_EVENT(0, "chsc: updating change notification"
432 " failed (rc=%d).\n", ret);
433}
434
435static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
436{ 398{
437#ifdef CONFIG_PCI 399 /* Check if we might have lost some information. */
438 switch (sei_area->cc) { 400 if (sei_area->flags & 0x40) {
439 case 1: 401 CIO_CRW_EVENT(2, "chsc: event overflow\n");
440 zpci_event_error(sei_area->ccdf); 402 css_schedule_eval_all();
441 break;
442 case 2:
443 zpci_event_availability(sei_area->ccdf);
444 break;
445 default:
446 CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
447 sei_area->cc);
448 break;
449 } 403 }
450#endif
451}
452
453static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
454{
455 /* which kind of information was stored? */ 404 /* which kind of information was stored? */
456 switch (sei_area->cc) { 405 switch (sei_area->cc) {
457 case 1: /* link incident*/ 406 case 1: /* link incident*/
@@ -466,9 +415,6 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
466 case 8: /* channel-path-configuration notification */ 415 case 8: /* channel-path-configuration notification */
467 chsc_process_sei_chp_config(sei_area); 416 chsc_process_sei_chp_config(sei_area);
468 break; 417 break;
469 case 12: /* scm change notification */
470 chsc_process_sei_scm_change(sei_area);
471 break;
472 default: /* other stuff */ 418 default: /* other stuff */
473 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 419 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
474 sei_area->cc); 420 sei_area->cc);
@@ -476,51 +422,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
476 } 422 }
477} 423}
478 424
479static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
480{
481 do {
482 memset(sei, 0, sizeof(*sei));
483 sei->request.length = 0x0010;
484 sei->request.code = 0x000e;
485 sei->ntsm = ntsm;
486
487 if (chsc(sei))
488 break;
489
490 if (sei->response.code == 0x0001) {
491 CIO_CRW_EVENT(2, "chsc: sei successful\n");
492
493 /* Check if we might have lost some information. */
494 if (sei->u.nt0_area.flags & 0x40) {
495 CIO_CRW_EVENT(2, "chsc: event overflow\n");
496 css_schedule_eval_all();
497 }
498
499 switch (sei->ntsm) {
500 case CHSC_SEI_NT0:
501 chsc_process_sei_nt0(&sei->u.nt0_area);
502 return 1;
503 case CHSC_SEI_NT2:
504 chsc_process_sei_nt2(&sei->u.nt2_area);
505 return 1;
506 default:
507 CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
508 sei->ntsm);
509 return 0;
510 }
511 } else {
512 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
513 sei->response.code);
514 break;
515 }
516 } while (sei->u.nt0_area.flags & 0x80);
517
518 return 0;
519}
520
521static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 425static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
522{ 426{
523 struct chsc_sei *sei; 427 struct chsc_sei_area *sei_area;
524 428
525 if (overflow) { 429 if (overflow) {
526 css_schedule_eval_all(); 430 css_schedule_eval_all();
@@ -534,18 +438,25 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
534 return; 438 return;
535 /* Access to sei_page is serialized through machine check handler 439 /* Access to sei_page is serialized through machine check handler
536 * thread, so no need for locking. */ 440 * thread, so no need for locking. */
537 sei = sei_page; 441 sei_area = sei_page;
538 442
539 CIO_TRACE_EVENT(2, "prcss"); 443 CIO_TRACE_EVENT(2, "prcss");
444 do {
445 memset(sei_area, 0, sizeof(*sei_area));
446 sei_area->request.length = 0x0010;
447 sei_area->request.code = 0x000e;
448 if (chsc(sei_area))
449 break;
540 450
541 /* 451 if (sei_area->response.code == 0x0001) {
542 * The ntsm does not allow to select NT0 and NT2 together. We need to 452 CIO_CRW_EVENT(4, "chsc: sei successful\n");
543 * first check for NT2, than additionally for NT0... 453 chsc_process_sei(sei_area);
544 */ 454 } else {
545#ifdef CONFIG_PCI 455 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
546 if (!__chsc_process_crw(sei, CHSC_SEI_NT2)) 456 sei_area->response.code);
547#endif 457 break;
548 __chsc_process_crw(sei, CHSC_SEI_NT0); 458 }
459 } while (sei_area->flags & 0x80);
549} 460}
550 461
551void chsc_chp_online(struct chp_id chpid) 462void chsc_chp_online(struct chp_id chpid)
@@ -618,7 +529,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
618int chsc_chp_vary(struct chp_id chpid, int on) 529int chsc_chp_vary(struct chp_id chpid, int on)
619{ 530{
620 struct channel_path *chp = chpid_to_chp(chpid); 531 struct channel_path *chp = chpid_to_chp(chpid);
532 struct chp_link link;
621 533
534 memset(&link, 0, sizeof(struct chp_link));
535 link.chpid = chpid;
622 /* Wait until previous actions have settled. */ 536 /* Wait until previous actions have settled. */
623 css_wait_for_slow_path(); 537 css_wait_for_slow_path();
624 /* 538 /*
@@ -628,10 +542,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
628 /* Try to update the channel path descritor. */ 542 /* Try to update the channel path descritor. */
629 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 543 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
630 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 544 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
631 __s390_vary_chpid_on, &chpid); 545 __s390_vary_chpid_on, &link);
632 } else 546 } else
633 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 547 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
634 NULL, &chpid); 548 NULL, &link);
635 549
636 return 0; 550 return 0;
637} 551}
@@ -1137,33 +1051,3 @@ out:
1137 return rc; 1051 return rc;
1138} 1052}
1139EXPORT_SYMBOL_GPL(chsc_siosl); 1053EXPORT_SYMBOL_GPL(chsc_siosl);
1140
1141/**
1142 * chsc_scm_info() - store SCM information (SSI)
1143 * @scm_area: request and response block for SSI
1144 * @token: continuation token
1145 *
1146 * Returns 0 on success.
1147 */
1148int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1149{
1150 int ccode, ret;
1151
1152 memset(scm_area, 0, sizeof(*scm_area));
1153 scm_area->request.length = 0x0020;
1154 scm_area->request.code = 0x004C;
1155 scm_area->reqtok = token;
1156
1157 ccode = chsc(scm_area);
1158 if (ccode > 0) {
1159 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1160 goto out;
1161 }
1162 ret = chsc_error_from_response(scm_area->response.code);
1163 if (ret != 0)
1164 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1165 scm_area->response.code);
1166out:
1167 return ret;
1168}
1169EXPORT_SYMBOL_GPL(chsc_scm_info);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 662dab4b93e..3f15b2aaeae 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -3,7 +3,6 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <asm/css_chars.h>
7#include <asm/chpid.h> 6#include <asm/chpid.h>
8#include <asm/chsc.h> 7#include <asm/chsc.h>
9#include <asm/schid.h> 8#include <asm/schid.h>
@@ -119,46 +118,4 @@ int chsc_error_from_response(int response);
119 118
120int chsc_siosl(struct subchannel_id schid); 119int chsc_siosl(struct subchannel_id schid);
121 120
122/* Functions and definitions to query storage-class memory. */
123struct sale {
124 u64 sa;
125 u32 p:4;
126 u32 op_state:4;
127 u32 data_state:4;
128 u32 rank:4;
129 u32 r:1;
130 u32:7;
131 u32 rid:8;
132 u32:32;
133} __packed;
134
135struct chsc_scm_info {
136 struct chsc_header request;
137 u32:32;
138 u64 reqtok;
139 u32 reserved1[4];
140 struct chsc_header response;
141 u64:56;
142 u8 rq;
143 u32 mbc;
144 u64 msa;
145 u16 is;
146 u16 mmc;
147 u32 mci;
148 u64 nr_scm_ini;
149 u64 nr_scm_unini;
150 u32 reserved2[10];
151 u64 restok;
152 struct sale scmal[248];
153} __packed;
154
155int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
156
157#ifdef CONFIG_SCM_BUS
158int scm_update_information(void);
159#else /* CONFIG_SCM_BUS */
160#define scm_update_information() 0
161#endif /* CONFIG_SCM_BUS */
162
163
164#endif 121#endif
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index facdf809113..e950f1ad4dd 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -1,19 +1,17 @@
1/* 1/*
2 * Driver for s390 chsc subchannels 2 * Driver for s390 chsc subchannels
3 * 3 *
4 * Copyright IBM Corp. 2008, 2011 4 * Copyright IBM Corp. 2008, 2009
5 * 5 *
6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7 * 7 *
8 */ 8 */
9 9
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/compat.h>
12#include <linux/device.h> 11#include <linux/device.h>
13#include <linux/module.h> 12#include <linux/module.h>
14#include <linux/uaccess.h> 13#include <linux/uaccess.h>
15#include <linux/miscdevice.h> 14#include <linux/miscdevice.h>
16#include <linux/kernel_stat.h>
17 15
18#include <asm/compat.h> 16#include <asm/compat.h>
19#include <asm/cio.h> 17#include <asm/cio.h>
@@ -58,8 +56,6 @@ static void chsc_subchannel_irq(struct subchannel *sch)
58 56
59 CHSC_LOG(4, "irb"); 57 CHSC_LOG(4, "irb");
60 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 58 CHSC_LOG_HEX(4, irb, sizeof(*irb));
61 inc_irq_stat(IRQIO_CSC);
62
63 /* Copy irb to provided request and set done. */ 59 /* Copy irb to provided request and set done. */
64 if (!request) { 60 if (!request) {
65 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n", 61 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index c8faf6230b0..eb3140ee821 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/cio/cio.c
2 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
3 * 4 *
4 * Copyright IBM Corp. 1999, 2008 5 * Copyright IBM Corp. 1999,2008
5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -600,6 +601,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
600 struct pt_regs *old_regs; 601 struct pt_regs *old_regs;
601 602
602 old_regs = set_irq_regs(regs); 603 old_regs = set_irq_regs(regs);
604 s390_idle_check(regs, S390_lowcore.int_clock,
605 S390_lowcore.async_enter_timer);
603 irq_enter(); 606 irq_enter();
604 __this_cpu_write(s390_idle.nohz_delay, 1); 607 __this_cpu_write(s390_idle.nohz_delay, 1);
605 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 608 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
@@ -611,7 +614,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
611 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; 614 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
612 irb = (struct irb *)&S390_lowcore.irb; 615 irb = (struct irb *)&S390_lowcore.irb;
613 do { 616 do {
614 kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL); 617 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
615 if (tpi_info->adapter_IO) { 618 if (tpi_info->adapter_IO) {
616 do_adapter_IO(tpi_info->isc); 619 do_adapter_IO(tpi_info->isc);
617 continue; 620 continue;
@@ -619,7 +622,6 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
619 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 622 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
620 if (!sch) { 623 if (!sch) {
621 /* Clear pending interrupt condition. */ 624 /* Clear pending interrupt condition. */
622 inc_irq_stat(IRQIO_CIO);
623 tsch(tpi_info->schid, irb); 625 tsch(tpi_info->schid, irb);
624 continue; 626 continue;
625 } 627 }
@@ -632,10 +634,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
632 /* Call interrupt handler if there is one. */ 634 /* Call interrupt handler if there is one. */
633 if (sch->driver && sch->driver->irq) 635 if (sch->driver && sch->driver->irq)
634 sch->driver->irq(sch); 636 sch->driver->irq(sch);
635 else 637 }
636 inc_irq_stat(IRQIO_CIO);
637 } else
638 inc_irq_stat(IRQIO_CIO);
639 spin_unlock(sch->lock); 638 spin_unlock(sch->lock);
640 /* 639 /*
641 * Are more interrupts pending? 640 * Are more interrupts pending?
@@ -655,34 +654,44 @@ static struct io_subchannel_private console_priv;
655static int console_subchannel_in_use; 654static int console_subchannel_in_use;
656 655
657/* 656/*
658 * Use cio_tsch to update the subchannel status and call the interrupt handler 657 * Use cio_tpi to get a pending interrupt and call the interrupt handler.
659 * if status had been pending. Called with the console_subchannel lock. 658 * Return non-zero if an interrupt was processed, zero otherwise.
660 */ 659 */
661static void cio_tsch(struct subchannel *sch) 660static int cio_tpi(void)
662{ 661{
662 struct tpi_info *tpi_info;
663 struct subchannel *sch;
663 struct irb *irb; 664 struct irb *irb;
664 int irq_context; 665 int irq_context;
665 666
667 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
668 if (tpi(NULL) != 1)
669 return 0;
670 if (tpi_info->adapter_IO) {
671 do_adapter_IO(tpi_info->isc);
672 return 1;
673 }
666 irb = (struct irb *)&S390_lowcore.irb; 674 irb = (struct irb *)&S390_lowcore.irb;
667 /* Store interrupt response block to lowcore. */ 675 /* Store interrupt response block to lowcore. */
668 if (tsch(sch->schid, irb) != 0) 676 if (tsch(tpi_info->schid, irb) != 0)
669 /* Not status pending or not operational. */ 677 /* Not status pending or not operational. */
670 return; 678 return 1;
671 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); 679 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
672 /* Call interrupt handler with updated status. */ 680 if (!sch)
681 return 1;
673 irq_context = in_interrupt(); 682 irq_context = in_interrupt();
674 if (!irq_context) { 683 if (!irq_context)
675 local_bh_disable(); 684 local_bh_disable();
676 irq_enter(); 685 irq_enter();
677 } 686 spin_lock(sch->lock);
687 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
678 if (sch->driver && sch->driver->irq) 688 if (sch->driver && sch->driver->irq)
679 sch->driver->irq(sch); 689 sch->driver->irq(sch);
680 else 690 spin_unlock(sch->lock);
681 inc_irq_stat(IRQIO_CIO); 691 irq_exit();
682 if (!irq_context) { 692 if (!irq_context)
683 irq_exit();
684 _local_bh_enable(); 693 _local_bh_enable();
685 } 694 return 1;
686} 695}
687 696
688void *cio_get_console_priv(void) 697void *cio_get_console_priv(void)
@@ -694,16 +703,34 @@ void *cio_get_console_priv(void)
694 * busy wait for the next interrupt on the console 703 * busy wait for the next interrupt on the console
695 */ 704 */
696void wait_cons_dev(void) 705void wait_cons_dev(void)
706 __releases(console_subchannel.lock)
707 __acquires(console_subchannel.lock)
697{ 708{
709 unsigned long cr6 __attribute__ ((aligned (8)));
710 unsigned long save_cr6 __attribute__ ((aligned (8)));
711
712 /*
713 * before entering the spinlock we may already have
714 * processed the interrupt on a different CPU...
715 */
698 if (!console_subchannel_in_use) 716 if (!console_subchannel_in_use)
699 return; 717 return;
700 718
701 while (1) { 719 /* disable all but the console isc */
702 cio_tsch(&console_subchannel); 720 __ctl_store (save_cr6, 6, 6);
703 if (console_subchannel.schib.scsw.cmd.actl == 0) 721 cr6 = 1UL << (31 - CONSOLE_ISC);
704 break; 722 __ctl_load (cr6, 6, 6);
705 udelay_simple(100); 723
706 } 724 do {
725 spin_unlock(console_subchannel.lock);
726 if (!cio_tpi())
727 cpu_relax();
728 spin_lock(console_subchannel.lock);
729 } while (console_subchannel.schib.scsw.cmd.actl != 0);
730 /*
731 * restore previous isc value
732 */
733 __ctl_load (save_cr6, 6, 6);
707} 734}
708 735
709static int 736static int
@@ -1029,9 +1056,9 @@ extern void do_reipl_asm(__u32 schid);
1029/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 1056/* Make sure all subchannels are quiet before we re-ipl an lpar. */
1030void reipl_ccw_dev(struct ccw_dev_id *devid) 1057void reipl_ccw_dev(struct ccw_dev_id *devid)
1031{ 1058{
1032 struct subchannel_id uninitialized_var(schid); 1059 struct subchannel_id schid;
1033 1060
1034 s390_reset_system(NULL, NULL); 1061 s390_reset_system();
1035 if (reipl_find_schid(devid, &schid) != 0) 1062 if (reipl_find_schid(devid, &schid) != 0)
1036 panic("IPL Device not found\n"); 1063 panic("IPL Device not found\n");
1037 do_reipl_asm(*((__u32*)&schid)); 1064 do_reipl_asm(*((__u32*)&schid));
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 4a1ff5c2eb8..155a82bcb9e 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,13 +68,8 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71/*
72 * When rescheduled, todo's with higher values will overwrite those
73 * with lower values.
74 */
75enum sch_todo { 71enum sch_todo {
76 SCH_TODO_NOTHING, 72 SCH_TODO_NOTHING,
77 SCH_TODO_EVAL,
78 SCH_TODO_UNREG, 73 SCH_TODO_UNREG,
79}; 74};
80 75
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index c9fc61c0a86..2985eb43948 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/cio/cmf.c
3 *
2 * Linux on zSeries Channel Measurement Facility support 4 * Linux on zSeries Channel Measurement Facility support
3 * 5 *
4 * Copyright IBM Corp. 2000, 2006 6 * Copyright 2000,2006 IBM Corporation
5 * 7 *
6 * Authors: Arnd Bergmann <arndb@de.ibm.com> 8 * Authors: Arnd Bergmann <arndb@de.ibm.com>
7 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -96,7 +98,7 @@ enum cmb_format {
96 * enum cmb_format. 98 * enum cmb_format.
97 */ 99 */
98static int format = CMF_AUTODETECT; 100static int format = CMF_AUTODETECT;
99module_param(format, bint, 0444); 101module_param(format, bool, 0444);
100 102
101/** 103/**
102 * struct cmb_operations - functions to use depending on cmb_format 104 * struct cmb_operations - functions to use depending on cmb_format
@@ -1339,7 +1341,7 @@ module_init(init_cmf);
1339MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); 1341MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
1340MODULE_LICENSE("GPL"); 1342MODULE_LICENSE("GPL");
1341MODULE_DESCRIPTION("channel measurement facility base driver\n" 1343MODULE_DESCRIPTION("channel measurement facility base driver\n"
1342 "Copyright IBM Corp. 2003\n"); 1344 "Copyright 2003 IBM Corporation\n");
1343 1345
1344EXPORT_SYMBOL_GPL(enable_cmf); 1346EXPORT_SYMBOL_GPL(enable_cmf);
1345EXPORT_SYMBOL_GPL(disable_cmf); 1347EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index 0f8a25f98b1..425f741a280 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Channel report handling code 2 * Channel report handling code
3 * 3 *
4 * Copyright IBM Corp. 2000, 2009 4 * Copyright IBM Corp. 2000,2009
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>, 5 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>, 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Cornelia Huck <cornelia.huck@de.ibm.com>, 7 * Cornelia Huck <cornelia.huck@de.ibm.com>,
@@ -13,7 +13,6 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/wait.h> 14#include <linux/wait.h>
15#include <asm/crw.h> 15#include <asm/crw.h>
16#include <asm/ctl_reg.h>
17 16
18static DEFINE_MUTEX(crw_handler_mutex); 17static DEFINE_MUTEX(crw_handler_mutex);
19static crw_handler_t crw_handlers[NR_RSCS]; 18static crw_handler_t crw_handlers[NR_RSCS];
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index fd00afd8b85..92d7324acb1 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,6 +195,51 @@ void css_sch_device_unregister(struct subchannel *sch)
195} 195}
196EXPORT_SYMBOL_GPL(css_sch_device_unregister); 196EXPORT_SYMBOL_GPL(css_sch_device_unregister);
197 197
198static void css_sch_todo(struct work_struct *work)
199{
200 struct subchannel *sch;
201 enum sch_todo todo;
202
203 sch = container_of(work, struct subchannel, todo_work);
204 /* Find out todo. */
205 spin_lock_irq(sch->lock);
206 todo = sch->todo;
207 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
208 sch->schid.sch_no, todo);
209 sch->todo = SCH_TODO_NOTHING;
210 spin_unlock_irq(sch->lock);
211 /* Perform todo. */
212 if (todo == SCH_TODO_UNREG)
213 css_sch_device_unregister(sch);
214 /* Release workqueue ref. */
215 put_device(&sch->dev);
216}
217
218/**
219 * css_sched_sch_todo - schedule a subchannel operation
220 * @sch: subchannel
221 * @todo: todo
222 *
223 * Schedule the operation identified by @todo to be performed on the slow path
224 * workqueue. Do nothing if another operation with higher priority is already
225 * scheduled. Needs to be called with subchannel lock held.
226 */
227void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
228{
229 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
230 sch->schid.ssid, sch->schid.sch_no, todo);
231 if (sch->todo >= todo)
232 return;
233 /* Get workqueue ref. */
234 if (!get_device(&sch->dev))
235 return;
236 sch->todo = todo;
237 if (!queue_work(cio_work_q, &sch->todo_work)) {
238 /* Already queued, release workqueue ref. */
239 put_device(&sch->dev);
240 }
241}
242
198static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 243static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
199{ 244{
200 int i; 245 int i;
@@ -377,11 +422,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
377 /* Will be done on the slow path. */ 422 /* Will be done on the slow path. */
378 return -EAGAIN; 423 return -EAGAIN;
379 } 424 }
380 if (stsch_err(schid, &schib)) { 425 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
381 /* Subchannel is not provided. */
382 return -ENXIO;
383 }
384 if (!css_sch_is_valid(&schib)) {
385 /* Unusable - ignore. */ 426 /* Unusable - ignore. */
386 return 0; 427 return 0;
387 } 428 }
@@ -425,66 +466,6 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
425 css_schedule_eval(schid); 466 css_schedule_eval(schid);
426} 467}
427 468
428/**
429 * css_sched_sch_todo - schedule a subchannel operation
430 * @sch: subchannel
431 * @todo: todo
432 *
433 * Schedule the operation identified by @todo to be performed on the slow path
434 * workqueue. Do nothing if another operation with higher priority is already
435 * scheduled. Needs to be called with subchannel lock held.
436 */
437void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
438{
439 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
440 sch->schid.ssid, sch->schid.sch_no, todo);
441 if (sch->todo >= todo)
442 return;
443 /* Get workqueue ref. */
444 if (!get_device(&sch->dev))
445 return;
446 sch->todo = todo;
447 if (!queue_work(cio_work_q, &sch->todo_work)) {
448 /* Already queued, release workqueue ref. */
449 put_device(&sch->dev);
450 }
451}
452EXPORT_SYMBOL_GPL(css_sched_sch_todo);
453
454static void css_sch_todo(struct work_struct *work)
455{
456 struct subchannel *sch;
457 enum sch_todo todo;
458 int ret;
459
460 sch = container_of(work, struct subchannel, todo_work);
461 /* Find out todo. */
462 spin_lock_irq(sch->lock);
463 todo = sch->todo;
464 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
465 sch->schid.sch_no, todo);
466 sch->todo = SCH_TODO_NOTHING;
467 spin_unlock_irq(sch->lock);
468 /* Perform todo. */
469 switch (todo) {
470 case SCH_TODO_NOTHING:
471 break;
472 case SCH_TODO_EVAL:
473 ret = css_evaluate_known_subchannel(sch, 1);
474 if (ret == -EAGAIN) {
475 spin_lock_irq(sch->lock);
476 css_sched_sch_todo(sch, todo);
477 spin_unlock_irq(sch->lock);
478 }
479 break;
480 case SCH_TODO_UNREG:
481 css_sch_device_unregister(sch);
482 break;
483 }
484 /* Release workqueue ref. */
485 put_device(&sch->dev);
486}
487
488static struct idset *slow_subchannel_set; 469static struct idset *slow_subchannel_set;
489static spinlock_t slow_subchannel_lock; 470static spinlock_t slow_subchannel_lock;
490static wait_queue_head_t css_eval_wq; 471static wait_queue_head_t css_eval_wq;
@@ -540,7 +521,6 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
540 case -ENOMEM: 521 case -ENOMEM:
541 case -EIO: 522 case -EIO:
542 /* These should abort looping */ 523 /* These should abort looping */
543 idset_sch_del_subseq(slow_subchannel_set, schid);
544 break; 524 break;
545 default: 525 default:
546 rc = 0; 526 rc = 0;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 4af3dfe70ef..80ebdddf774 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -112,6 +112,9 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
112extern void css_reiterate_subchannels(void); 112extern void css_reiterate_subchannels(void);
113void css_update_ssd_info(struct subchannel *sch); 113void css_update_ssd_info(struct subchannel *sch);
114 114
115#define __MAX_SUBCHANNEL 65535
116#define __MAX_SSID 3
117
115struct channel_subsystem { 118struct channel_subsystem {
116 u8 cssid; 119 u8 cssid;
117 int valid; 120 int valid;
@@ -130,8 +133,6 @@ struct channel_subsystem {
130 133
131extern struct channel_subsystem *channel_subsystems[]; 134extern struct channel_subsystem *channel_subsystems[];
132 135
133void channel_subsystem_reinit(void);
134
135/* Helper functions to build lists for the slow path. */ 136/* Helper functions to build lists for the slow path. */
136void css_schedule_eval(struct subchannel_id schid); 137void css_schedule_eval(struct subchannel_id schid);
137void css_schedule_eval_all(void); 138void css_schedule_eval_all(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 7cd5c6812ac..8e04c00cf0a 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/cio/device.c
2 * bus driver for ccw devices 3 * bus driver for ccw devices
3 * 4 *
4 * Copyright IBM Corp. 2002, 2008 5 * Copyright IBM Corp. 2002,2008
5 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
@@ -20,7 +21,6 @@
20#include <linux/device.h> 21#include <linux/device.h>
21#include <linux/workqueue.h> 22#include <linux/workqueue.h>
22#include <linux/timer.h> 23#include <linux/timer.h>
23#include <linux/kernel_stat.h>
24 24
25#include <asm/ccwdev.h> 25#include <asm/ccwdev.h>
26#include <asm/cio.h> 26#include <asm/cio.h>
@@ -694,17 +694,7 @@ static int match_dev_id(struct device *dev, void *data)
694 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 694 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
695} 695}
696 696
697/** 697static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
698 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
699 * @dev_id: id of the device to be searched
700 *
701 * This function searches all devices attached to the ccw bus for a device
702 * matching @dev_id.
703 * Returns:
704 * If a device is found its reference count is increased and returned;
705 * else %NULL is returned.
706 */
707struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
708{ 698{
709 struct device *dev; 699 struct device *dev;
710 700
@@ -712,7 +702,6 @@ struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
712 702
713 return dev ? to_ccwdev(dev) : NULL; 703 return dev ? to_ccwdev(dev) : NULL;
714} 704}
715EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
716 705
717static void ccw_device_do_unbind_bind(struct ccw_device *cdev) 706static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
718{ 707{
@@ -758,7 +747,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
758 struct ccw_device *cdev) 747 struct ccw_device *cdev)
759{ 748{
760 cdev->private->cdev = cdev; 749 cdev->private->cdev = cdev;
761 cdev->private->int_class = IRQIO_CIO;
762 atomic_set(&cdev->private->onoff, 0); 750 atomic_set(&cdev->private->onoff, 0);
763 cdev->dev.parent = &sch->dev; 751 cdev->dev.parent = &sch->dev;
764 cdev->dev.release = ccw_device_release; 752 cdev->dev.release = ccw_device_release;
@@ -1022,8 +1010,6 @@ static void io_subchannel_irq(struct subchannel *sch)
1022 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); 1010 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1023 if (cdev) 1011 if (cdev)
1024 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1012 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1025 else
1026 inc_irq_stat(IRQIO_CIO);
1027} 1013}
1028 1014
1029void io_subchannel_init_config(struct subchannel *sch) 1015void io_subchannel_init_config(struct subchannel *sch)
@@ -1424,10 +1410,8 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
1424 } 1410 }
1425 if (device_is_disconnected(cdev)) 1411 if (device_is_disconnected(cdev))
1426 return IO_SCH_REPROBE; 1412 return IO_SCH_REPROBE;
1427 if (cdev->online && !cdev->private->flags.resuming) 1413 if (cdev->online)
1428 return IO_SCH_VERIFY; 1414 return IO_SCH_VERIFY;
1429 if (cdev->private->state == DEV_STATE_NOT_OPER)
1430 return IO_SCH_UNREG_ATTACH;
1431 return IO_SCH_NOP; 1415 return IO_SCH_NOP;
1432} 1416}
1433 1417
@@ -1469,6 +1453,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1469 rc = 0; 1453 rc = 0;
1470 goto out_unlock; 1454 goto out_unlock;
1471 case IO_SCH_VERIFY: 1455 case IO_SCH_VERIFY:
1456 if (cdev->private->flags.resuming == 1) {
1457 if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
1458 ccw_device_set_notoper(cdev);
1459 break;
1460 }
1461 }
1472 /* Trigger path verification. */ 1462 /* Trigger path verification. */
1473 io_subchannel_verify(sch); 1463 io_subchannel_verify(sch);
1474 rc = 0; 1464 rc = 0;
@@ -1515,14 +1505,11 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1515 goto out; 1505 goto out;
1516 break; 1506 break;
1517 case IO_SCH_UNREG_ATTACH: 1507 case IO_SCH_UNREG_ATTACH:
1518 spin_lock_irqsave(sch->lock, flags);
1519 if (cdev->private->flags.resuming) { 1508 if (cdev->private->flags.resuming) {
1520 /* Device will be handled later. */ 1509 /* Device will be handled later. */
1521 rc = 0; 1510 rc = 0;
1522 goto out_unlock; 1511 goto out;
1523 } 1512 }
1524 sch_set_cdev(sch, NULL);
1525 spin_unlock_irqrestore(sch->lock, flags);
1526 /* Unregister ccw device. */ 1513 /* Unregister ccw device. */
1527 ccw_device_unregister(cdev); 1514 ccw_device_unregister(cdev);
1528 break; 1515 break;
@@ -1634,7 +1621,6 @@ ccw_device_probe_console(void)
1634 memset(&console_private, 0, sizeof(struct ccw_device_private)); 1621 memset(&console_private, 0, sizeof(struct ccw_device_private));
1635 console_cdev.private = &console_private; 1622 console_cdev.private = &console_private;
1636 console_private.cdev = &console_cdev; 1623 console_private.cdev = &console_cdev;
1637 console_private.int_class = IRQIO_CIO;
1638 ret = ccw_device_console_enable(&console_cdev, sch); 1624 ret = ccw_device_console_enable(&console_cdev, sch);
1639 if (ret) { 1625 if (ret) {
1640 cio_release_console(); 1626 cio_release_console();
@@ -1685,9 +1671,15 @@ struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1685 const char *bus_id) 1671 const char *bus_id)
1686{ 1672{
1687 struct device *dev; 1673 struct device *dev;
1674 struct device_driver *drv;
1675
1676 drv = get_driver(&cdrv->driver);
1677 if (!drv)
1678 return NULL;
1688 1679
1689 dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, 1680 dev = driver_find_device(drv, NULL, (void *)bus_id,
1690 __ccwdev_check_busid); 1681 __ccwdev_check_busid);
1682 put_driver(drv);
1691 1683
1692 return dev ? to_ccwdev(dev) : NULL; 1684 return dev ? to_ccwdev(dev) : NULL;
1693} 1685}
@@ -1710,18 +1702,11 @@ ccw_device_probe (struct device *dev)
1710 int ret; 1702 int ret;
1711 1703
1712 cdev->drv = cdrv; /* to let the driver call _set_online */ 1704 cdev->drv = cdrv; /* to let the driver call _set_online */
1713 /* Note: we interpret class 0 in this context as an uninitialized
1714 * field since it translates to a non-I/O interrupt class. */
1715 if (cdrv->int_class != 0)
1716 cdev->private->int_class = cdrv->int_class;
1717 else
1718 cdev->private->int_class = IRQIO_CIO;
1719 1705
1720 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1706 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1721 1707
1722 if (ret) { 1708 if (ret) {
1723 cdev->drv = NULL; 1709 cdev->drv = NULL;
1724 cdev->private->int_class = IRQIO_CIO;
1725 return ret; 1710 return ret;
1726 } 1711 }
1727 1712
@@ -1755,7 +1740,6 @@ ccw_device_remove (struct device *dev)
1755 } 1740 }
1756 ccw_device_set_timeout(cdev, 0); 1741 ccw_device_set_timeout(cdev, 0);
1757 cdev->drv = NULL; 1742 cdev->drv = NULL;
1758 cdev->private->int_class = IRQIO_CIO;
1759 return 0; 1743 return 0;
1760} 1744}
1761 1745
@@ -1871,9 +1855,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1871 */ 1855 */
1872 cdev->private->flags.resuming = 1; 1856 cdev->private->flags.resuming = 1;
1873 cdev->private->path_new_mask = LPM_ANYPATH; 1857 cdev->private->path_new_mask = LPM_ANYPATH;
1874 css_sched_sch_todo(sch, SCH_TODO_EVAL); 1858 css_schedule_eval(sch->schid);
1875 spin_unlock_irq(sch->lock); 1859 spin_unlock_irq(sch->lock);
1876 css_wait_for_slow_path(); 1860 css_complete_work();
1877 1861
1878 /* cdev may have been moved to a different subchannel. */ 1862 /* cdev may have been moved to a different subchannel. */
1879 sch = to_subchannel(cdev->dev.parent); 1863 sch = to_subchannel(cdev->dev.parent);
@@ -2036,6 +2020,16 @@ void ccw_driver_unregister(struct ccw_driver *cdriver)
2036 driver_unregister(&cdriver->driver); 2020 driver_unregister(&cdriver->driver);
2037} 2021}
2038 2022
2023/* Helper func for qdio. */
2024struct subchannel_id
2025ccw_device_get_subchannel_id(struct ccw_device *cdev)
2026{
2027 struct subchannel *sch;
2028
2029 sch = to_subchannel(cdev->dev.parent);
2030 return sch->schid;
2031}
2032
2039static void ccw_device_todo(struct work_struct *work) 2033static void ccw_device_todo(struct work_struct *work)
2040{ 2034{
2041 struct ccw_device_private *priv; 2035 struct ccw_device_private *priv;
@@ -2128,3 +2122,4 @@ EXPORT_SYMBOL(ccw_device_set_offline);
2128EXPORT_SYMBOL(ccw_driver_register); 2122EXPORT_SYMBOL(ccw_driver_register);
2129EXPORT_SYMBOL(ccw_driver_unregister); 2123EXPORT_SYMBOL(ccw_driver_unregister);
2130EXPORT_SYMBOL(get_ccwdev_by_busid); 2124EXPORT_SYMBOL(get_ccwdev_by_busid);
2125EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 7d4ecb65db0..0b7245c72d5 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -5,7 +5,6 @@
5#include <linux/atomic.h> 5#include <linux/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7#include <linux/notifier.h> 7#include <linux/notifier.h>
8#include <linux/kernel_stat.h>
9#include "io_sch.h" 8#include "io_sch.h"
10 9
11/* 10/*
@@ -57,16 +56,7 @@ extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
57static inline void 56static inline void
58dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event) 57dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
59{ 58{
60 int state = cdev->private->state; 59 dev_jumptable[cdev->private->state][dev_event](cdev, dev_event);
61
62 if (dev_event == DEV_EVENT_INTERRUPT) {
63 if (state == DEV_STATE_ONLINE)
64 inc_irq_stat(cdev->private->int_class);
65 else if (state != DEV_STATE_CMFCHANGE &&
66 state != DEV_STATE_CMFUPDATE)
67 inc_irq_stat(IRQIO_CIO);
68 }
69 dev_jumptable[state][dev_event](cdev, dev_event);
70} 60}
71 61
72/* 62/*
@@ -100,7 +90,6 @@ int ccw_device_test_sense_data(struct ccw_device *);
100void ccw_device_schedule_sch_unregister(struct ccw_device *); 90void ccw_device_schedule_sch_unregister(struct ccw_device *);
101int ccw_purge_blacklisted(void); 91int ccw_purge_blacklisted(void);
102void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo); 92void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
103struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
104 93
105/* Function prototypes for device status and basic sense stuff. */ 94/* Function prototypes for device status and basic sense stuff. */
106void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 95void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -141,7 +130,9 @@ int ccw_device_notify(struct ccw_device *, int);
141void ccw_device_set_disconnected(struct ccw_device *cdev); 130void ccw_device_set_disconnected(struct ccw_device *cdev);
142void ccw_device_set_notoper(struct ccw_device *cdev); 131void ccw_device_set_notoper(struct ccw_device *cdev);
143 132
133/* qdio needs this. */
144void ccw_device_set_timeout(struct ccw_device *, int); 134void ccw_device_set_timeout(struct ccw_device *, int);
135extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
145 136
146/* Channel measurement facility related */ 137/* Channel measurement facility related */
147void retry_set_schib(struct ccw_device *cdev); 138void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1bb1d00095a..52c233fa2b1 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * drivers/s390/cio/device_fsm.c
2 * finite state machine for device handling 3 * finite state machine for device handling
3 * 4 *
4 * Copyright IBM Corp. 2002, 2008 5 * Copyright IBM Corp. 2002,2008
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 */ 8 */
@@ -495,26 +496,8 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
495 cdev->private->pgid_reset_mask = 0; 496 cdev->private->pgid_reset_mask = 0;
496} 497}
497 498
498static void create_fake_irb(struct irb *irb, int type) 499void
499{ 500ccw_device_verify_done(struct ccw_device *cdev, int err)
500 memset(irb, 0, sizeof(*irb));
501 if (type == FAKE_CMD_IRB) {
502 struct cmd_scsw *scsw = &irb->scsw.cmd;
503 scsw->cc = 1;
504 scsw->fctl = SCSW_FCTL_START_FUNC;
505 scsw->actl = SCSW_ACTL_START_PEND;
506 scsw->stctl = SCSW_STCTL_STATUS_PEND;
507 } else if (type == FAKE_TM_IRB) {
508 struct tm_scsw *scsw = &irb->scsw.tm;
509 scsw->x = 1;
510 scsw->cc = 1;
511 scsw->fctl = SCSW_FCTL_START_FUNC;
512 scsw->actl = SCSW_ACTL_START_PEND;
513 scsw->stctl = SCSW_STCTL_STATUS_PEND;
514 }
515}
516
517void ccw_device_verify_done(struct ccw_device *cdev, int err)
518{ 501{
519 struct subchannel *sch; 502 struct subchannel *sch;
520 503
@@ -537,8 +520,12 @@ callback:
537 ccw_device_done(cdev, DEV_STATE_ONLINE); 520 ccw_device_done(cdev, DEV_STATE_ONLINE);
538 /* Deliver fake irb to device driver, if needed. */ 521 /* Deliver fake irb to device driver, if needed. */
539 if (cdev->private->flags.fake_irb) { 522 if (cdev->private->flags.fake_irb) {
540 create_fake_irb(&cdev->private->irb, 523 memset(&cdev->private->irb, 0, sizeof(struct irb));
541 cdev->private->flags.fake_irb); 524 cdev->private->irb.scsw.cmd.cc = 1;
525 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
526 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
527 cdev->private->irb.scsw.cmd.stctl =
528 SCSW_STCTL_STATUS_PEND;
542 cdev->private->flags.fake_irb = 0; 529 cdev->private->flags.fake_irb = 0;
543 if (cdev->handler) 530 if (cdev->handler)
544 cdev->handler(cdev, cdev->private->intparm, 531 cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index d4fa30541a3..78a0b43862c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * CCW device SENSE ID I/O handling. 2 * CCW device SENSE ID I/O handling.
3 * 3 *
4 * Copyright IBM Corp. 2002, 2009 4 * Copyright IBM Corp. 2002,2009
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c77b6e06bf6..f98698d5735 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
198 if (cdev->private->state == DEV_STATE_VERIFY) { 198 if (cdev->private->state == DEV_STATE_VERIFY) {
199 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
200 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
201 cdev->private->flags.fake_irb = FAKE_CMD_IRB; 201 cdev->private->flags.fake_irb = 1;
202 cdev->private->intparm = intparm; 202 cdev->private->intparm = intparm;
203 return 0; 203 return 0;
204 } else 204 } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
213 ret = cio_set_options (sch, flags); 213 ret = cio_set_options (sch, flags);
214 if (ret) 214 if (ret)
215 return ret; 215 return ret;
216 /* Adjust requested path mask to exclude unusable paths. */ 216 /* Adjust requested path mask to excluded varied off paths. */
217 if (lpm) { 217 if (lpm) {
218 lpm &= sch->lpm; 218 lpm &= sch->opm;
219 if (lpm == 0) 219 if (lpm == 0)
220 return -EACCES; 220 return -EACCES;
221 } 221 }
@@ -605,21 +605,11 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
605 sch = to_subchannel(cdev->dev.parent); 605 sch = to_subchannel(cdev->dev.parent);
606 if (!sch->schib.pmcw.ena) 606 if (!sch->schib.pmcw.ena)
607 return -EINVAL; 607 return -EINVAL;
608 if (cdev->private->state == DEV_STATE_VERIFY) {
609 /* Remember to fake irb when finished. */
610 if (!cdev->private->flags.fake_irb) {
611 cdev->private->flags.fake_irb = FAKE_TM_IRB;
612 cdev->private->intparm = intparm;
613 return 0;
614 } else
615 /* There's already a fake I/O around. */
616 return -EBUSY;
617 }
618 if (cdev->private->state != DEV_STATE_ONLINE) 608 if (cdev->private->state != DEV_STATE_ONLINE)
619 return -EIO; 609 return -EIO;
620 /* Adjust requested path mask to exclude unusable paths. */ 610 /* Adjust requested path mask to excluded varied off paths. */
621 if (lpm) { 611 if (lpm) {
622 lpm &= sch->lpm; 612 lpm &= sch->opm;
623 if (lpm == 0) 613 if (lpm == 0)
624 return -EACCES; 614 return -EACCES;
625 } 615 }
@@ -755,18 +745,14 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
755} 745}
756EXPORT_SYMBOL(ccw_device_tm_intrg); 746EXPORT_SYMBOL(ccw_device_tm_intrg);
757 747
758/** 748// FIXME: these have to go:
759 * ccw_device_get_schid - obtain a subchannel id
760 * @cdev: device to obtain the id for
761 * @schid: where to fill in the values
762 */
763void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
764{
765 struct subchannel *sch = to_subchannel(cdev->dev.parent);
766 749
767 *schid = sch->schid; 750int
751_ccw_device_get_subchannel_number(struct ccw_device *cdev)
752{
753 return cdev->private->schid.sch_no;
768} 754}
769EXPORT_SYMBOL_GPL(ccw_device_get_schid); 755
770 756
771MODULE_LICENSE("GPL"); 757MODULE_LICENSE("GPL");
772EXPORT_SYMBOL(ccw_device_set_options_mask); 758EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -781,4 +767,5 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
781EXPORT_SYMBOL(ccw_device_start_key); 767EXPORT_SYMBOL(ccw_device_start_key);
782EXPORT_SYMBOL(ccw_device_get_ciw); 768EXPORT_SYMBOL(ccw_device_get_ciw);
783EXPORT_SYMBOL(ccw_device_get_path_mask); 769EXPORT_SYMBOL(ccw_device_get_path_mask);
770EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
784EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); 771EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 908d287f66c..07a4fd29f09 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * CCW device PGID and path verification I/O handling. 2 * CCW device PGID and path verification I/O handling.
3 * 3 *
4 * Copyright IBM Corp. 2002, 2009 4 * Copyright IBM Corp. 2002,2009
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
234 * Determine pathgroup state from PGID data. 234 * Determine pathgroup state from PGID data.
235 */ 235 */
236static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, 236static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
237 int *mismatch, u8 *reserved, u8 *reset) 237 int *mismatch, int *reserved, u8 *reset)
238{ 238{
239 struct pgid *pgid = &cdev->private->pgid[0]; 239 struct pgid *pgid = &cdev->private->pgid[0];
240 struct pgid *first = NULL; 240 struct pgid *first = NULL;
@@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
248 if ((cdev->private->pgid_valid_mask & lpm) == 0) 248 if ((cdev->private->pgid_valid_mask & lpm) == 0)
249 continue; 249 continue;
250 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) 250 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
251 *reserved |= lpm; 251 *reserved = 1;
252 if (pgid_is_reset(pgid)) { 252 if (pgid_is_reset(pgid)) {
253 *reset |= lpm; 253 *reset |= lpm;
254 continue; 254 continue;
@@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
316 struct subchannel *sch = to_subchannel(cdev->dev.parent); 316 struct subchannel *sch = to_subchannel(cdev->dev.parent);
317 struct pgid *pgid; 317 struct pgid *pgid;
318 int mismatch = 0; 318 int mismatch = 0;
319 u8 reserved = 0; 319 int reserved = 0;
320 u8 reset = 0; 320 u8 reset = 0;
321 u8 donepm; 321 u8 donepm;
322 322
323 if (rc) 323 if (rc)
324 goto out; 324 goto out;
325 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); 325 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
326 if (reserved == cdev->private->pgid_valid_mask) 326 if (reserved)
327 rc = -EUSERS; 327 rc = -EUSERS;
328 else if (mismatch) 328 else if (mismatch)
329 rc = -EOPNOTSUPP; 329 rc = -EOPNOTSUPP;
@@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
336 } 336 }
337out: 337out:
338 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 338 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
339 "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid, 339 "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
340 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, 340 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
341 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 341 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
342 switch (rc) { 342 switch (rc) {
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 15b56a15db1..66d8066ef22 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -1,5 +1,8 @@
1/* 1/*
2 * Copyright IBM Corp. 2002 2 * drivers/s390/cio/device_status.c
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
5 * IBM Corporation
3 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
4 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
5 * 8 *
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
deleted file mode 100644
index d9eddcba7e8..00000000000
--- a/drivers/s390/cio/eadm_sch.c
+++ /dev/null
@@ -1,401 +0,0 @@
1/*
2 * Driver for s390 eadm subchannels
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel_stat.h>
9#include <linux/workqueue.h>
10#include <linux/spinlock.h>
11#include <linux/device.h>
12#include <linux/module.h>
13#include <linux/timer.h>
14#include <linux/slab.h>
15#include <linux/list.h>
16
17#include <asm/css_chars.h>
18#include <asm/debug.h>
19#include <asm/isc.h>
20#include <asm/cio.h>
21#include <asm/scsw.h>
22#include <asm/eadm.h>
23
24#include "eadm_sch.h"
25#include "ioasm.h"
26#include "cio.h"
27#include "css.h"
28#include "orb.h"
29
30MODULE_DESCRIPTION("driver for s390 eadm subchannels");
31MODULE_LICENSE("GPL");
32
33#define EADM_TIMEOUT (5 * HZ)
34static DEFINE_SPINLOCK(list_lock);
35static LIST_HEAD(eadm_list);
36
37static debug_info_t *eadm_debug;
38
39#define EADM_LOG(imp, txt) do { \
40 debug_text_event(eadm_debug, imp, txt); \
41 } while (0)
42
43static void EADM_LOG_HEX(int level, void *data, int length)
44{
45 if (level > eadm_debug->level)
46 return;
47 while (length > 0) {
48 debug_event(eadm_debug, level, data, length);
49 length -= eadm_debug->buf_size;
50 data += eadm_debug->buf_size;
51 }
52}
53
54static void orb_init(union orb *orb)
55{
56 memset(orb, 0, sizeof(union orb));
57 orb->eadm.compat1 = 1;
58 orb->eadm.compat2 = 1;
59 orb->eadm.fmt = 1;
60 orb->eadm.x = 1;
61}
62
63static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
64{
65 union orb *orb = &get_eadm_private(sch)->orb;
66 int cc;
67
68 orb_init(orb);
69 orb->eadm.aob = (u32)__pa(aob);
70 orb->eadm.intparm = (u32)(addr_t)sch;
71 orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
72
73 EADM_LOG(6, "start");
74 EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
75
76 cc = ssch(sch->schid, orb);
77 switch (cc) {
78 case 0:
79 sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
80 break;
81 case 1: /* status pending */
82 case 2: /* busy */
83 return -EBUSY;
84 case 3: /* not operational */
85 return -ENODEV;
86 }
87 return 0;
88}
89
90static int eadm_subchannel_clear(struct subchannel *sch)
91{
92 int cc;
93
94 cc = csch(sch->schid);
95 if (cc)
96 return -ENODEV;
97
98 sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
99 return 0;
100}
101
102static void eadm_subchannel_timeout(unsigned long data)
103{
104 struct subchannel *sch = (struct subchannel *) data;
105
106 spin_lock_irq(sch->lock);
107 EADM_LOG(1, "timeout");
108 EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
109 if (eadm_subchannel_clear(sch))
110 EADM_LOG(0, "clear failed");
111 spin_unlock_irq(sch->lock);
112}
113
114static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
115{
116 struct eadm_private *private = get_eadm_private(sch);
117
118 if (expires == 0) {
119 del_timer(&private->timer);
120 return;
121 }
122 if (timer_pending(&private->timer)) {
123 if (mod_timer(&private->timer, jiffies + expires))
124 return;
125 }
126 private->timer.function = eadm_subchannel_timeout;
127 private->timer.data = (unsigned long) sch;
128 private->timer.expires = jiffies + expires;
129 add_timer(&private->timer);
130}
131
132static void eadm_subchannel_irq(struct subchannel *sch)
133{
134 struct eadm_private *private = get_eadm_private(sch);
135 struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
136 struct irb *irb = (struct irb *)&S390_lowcore.irb;
137 int error = 0;
138
139 EADM_LOG(6, "irq");
140 EADM_LOG_HEX(6, irb, sizeof(*irb));
141
142 inc_irq_stat(IRQIO_ADM);
143
144 if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
145 && scsw->eswf == 1 && irb->esw.eadm.erw.r)
146 error = -EIO;
147
148 if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
149 error = -ETIMEDOUT;
150
151 eadm_subchannel_set_timeout(sch, 0);
152
153 if (private->state != EADM_BUSY) {
154 EADM_LOG(1, "irq unsol");
155 EADM_LOG_HEX(1, irb, sizeof(*irb));
156 private->state = EADM_NOT_OPER;
157 css_sched_sch_todo(sch, SCH_TODO_EVAL);
158 return;
159 }
160 scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
161 private->state = EADM_IDLE;
162}
163
164static struct subchannel *eadm_get_idle_sch(void)
165{
166 struct eadm_private *private;
167 struct subchannel *sch;
168 unsigned long flags;
169
170 spin_lock_irqsave(&list_lock, flags);
171 list_for_each_entry(private, &eadm_list, head) {
172 sch = private->sch;
173 spin_lock(sch->lock);
174 if (private->state == EADM_IDLE) {
175 private->state = EADM_BUSY;
176 list_move_tail(&private->head, &eadm_list);
177 spin_unlock(sch->lock);
178 spin_unlock_irqrestore(&list_lock, flags);
179
180 return sch;
181 }
182 spin_unlock(sch->lock);
183 }
184 spin_unlock_irqrestore(&list_lock, flags);
185
186 return NULL;
187}
188
189static int eadm_start_aob(struct aob *aob)
190{
191 struct eadm_private *private;
192 struct subchannel *sch;
193 unsigned long flags;
194 int ret;
195
196 sch = eadm_get_idle_sch();
197 if (!sch)
198 return -EBUSY;
199
200 spin_lock_irqsave(sch->lock, flags);
201 eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
202 ret = eadm_subchannel_start(sch, aob);
203 if (!ret)
204 goto out_unlock;
205
206 /* Handle start subchannel failure. */
207 eadm_subchannel_set_timeout(sch, 0);
208 private = get_eadm_private(sch);
209 private->state = EADM_NOT_OPER;
210 css_sched_sch_todo(sch, SCH_TODO_EVAL);
211
212out_unlock:
213 spin_unlock_irqrestore(sch->lock, flags);
214
215 return ret;
216}
217
218static int eadm_subchannel_probe(struct subchannel *sch)
219{
220 struct eadm_private *private;
221 int ret;
222
223 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
224 if (!private)
225 return -ENOMEM;
226
227 INIT_LIST_HEAD(&private->head);
228 init_timer(&private->timer);
229
230 spin_lock_irq(sch->lock);
231 set_eadm_private(sch, private);
232 private->state = EADM_IDLE;
233 private->sch = sch;
234 sch->isc = EADM_SCH_ISC;
235 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
236 if (ret) {
237 set_eadm_private(sch, NULL);
238 spin_unlock_irq(sch->lock);
239 kfree(private);
240 goto out;
241 }
242 spin_unlock_irq(sch->lock);
243
244 spin_lock_irq(&list_lock);
245 list_add(&private->head, &eadm_list);
246 spin_unlock_irq(&list_lock);
247
248 if (dev_get_uevent_suppress(&sch->dev)) {
249 dev_set_uevent_suppress(&sch->dev, 0);
250 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
251 }
252out:
253 return ret;
254}
255
256static void eadm_quiesce(struct subchannel *sch)
257{
258 int ret;
259
260 do {
261 spin_lock_irq(sch->lock);
262 ret = cio_disable_subchannel(sch);
263 spin_unlock_irq(sch->lock);
264 } while (ret == -EBUSY);
265}
266
267static int eadm_subchannel_remove(struct subchannel *sch)
268{
269 struct eadm_private *private = get_eadm_private(sch);
270
271 spin_lock_irq(&list_lock);
272 list_del(&private->head);
273 spin_unlock_irq(&list_lock);
274
275 eadm_quiesce(sch);
276
277 spin_lock_irq(sch->lock);
278 set_eadm_private(sch, NULL);
279 spin_unlock_irq(sch->lock);
280
281 kfree(private);
282
283 return 0;
284}
285
286static void eadm_subchannel_shutdown(struct subchannel *sch)
287{
288 eadm_quiesce(sch);
289}
290
291static int eadm_subchannel_freeze(struct subchannel *sch)
292{
293 return cio_disable_subchannel(sch);
294}
295
296static int eadm_subchannel_restore(struct subchannel *sch)
297{
298 return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
299}
300
301/**
302 * eadm_subchannel_sch_event - process subchannel event
303 * @sch: subchannel
304 * @process: non-zero if function is called in process context
305 *
306 * An unspecified event occurred for this subchannel. Adjust data according
307 * to the current operational state of the subchannel. Return zero when the
308 * event has been handled sufficiently or -EAGAIN when this function should
309 * be called again in process context.
310 */
311static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
312{
313 struct eadm_private *private;
314 unsigned long flags;
315 int ret = 0;
316
317 spin_lock_irqsave(sch->lock, flags);
318 if (!device_is_registered(&sch->dev))
319 goto out_unlock;
320
321 if (work_pending(&sch->todo_work))
322 goto out_unlock;
323
324 if (cio_update_schib(sch)) {
325 css_sched_sch_todo(sch, SCH_TODO_UNREG);
326 goto out_unlock;
327 }
328 private = get_eadm_private(sch);
329 if (private->state == EADM_NOT_OPER)
330 private->state = EADM_IDLE;
331
332out_unlock:
333 spin_unlock_irqrestore(sch->lock, flags);
334
335 return ret;
336}
337
338static struct css_device_id eadm_subchannel_ids[] = {
339 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
340 { /* end of list */ },
341};
342MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
343
344static struct css_driver eadm_subchannel_driver = {
345 .drv = {
346 .name = "eadm_subchannel",
347 .owner = THIS_MODULE,
348 },
349 .subchannel_type = eadm_subchannel_ids,
350 .irq = eadm_subchannel_irq,
351 .probe = eadm_subchannel_probe,
352 .remove = eadm_subchannel_remove,
353 .shutdown = eadm_subchannel_shutdown,
354 .sch_event = eadm_subchannel_sch_event,
355 .freeze = eadm_subchannel_freeze,
356 .thaw = eadm_subchannel_restore,
357 .restore = eadm_subchannel_restore,
358};
359
360static struct eadm_ops eadm_ops = {
361 .eadm_start = eadm_start_aob,
362 .owner = THIS_MODULE,
363};
364
365static int __init eadm_sch_init(void)
366{
367 int ret;
368
369 if (!css_general_characteristics.eadm)
370 return -ENXIO;
371
372 eadm_debug = debug_register("eadm_log", 16, 1, 16);
373 if (!eadm_debug)
374 return -ENOMEM;
375
376 debug_register_view(eadm_debug, &debug_hex_ascii_view);
377 debug_set_level(eadm_debug, 2);
378
379 isc_register(EADM_SCH_ISC);
380 ret = css_driver_register(&eadm_subchannel_driver);
381 if (ret)
382 goto cleanup;
383
384 register_eadm_ops(&eadm_ops);
385 return ret;
386
387cleanup:
388 isc_unregister(EADM_SCH_ISC);
389 debug_unregister(eadm_debug);
390 return ret;
391}
392
393static void __exit eadm_sch_exit(void)
394{
395 unregister_eadm_ops(&eadm_ops);
396 css_driver_unregister(&eadm_subchannel_driver);
397 isc_unregister(EADM_SCH_ISC);
398 debug_unregister(eadm_debug);
399}
400module_init(eadm_sch_init);
401module_exit(eadm_sch_exit);
diff --git a/drivers/s390/cio/eadm_sch.h b/drivers/s390/cio/eadm_sch.h
deleted file mode 100644
index 2779be09398..00000000000
--- a/drivers/s390/cio/eadm_sch.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef EADM_SCH_H
2#define EADM_SCH_H
3
4#include <linux/device.h>
5#include <linux/timer.h>
6#include <linux/list.h>
7#include "orb.h"
8
9struct eadm_private {
10 union orb orb;
11 enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
12 struct timer_list timer;
13 struct list_head head;
14 struct subchannel *sch;
15} __aligned(8);
16
17#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
18#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
19
20#endif
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 65d13e38803..4d10981c7cc 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * Copyright IBM Corp. 2007, 2012 2 * drivers/s390/cio/idset.c
3 *
4 * Copyright IBM Corp. 2007
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 6 */
5 7
6#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
7#include <linux/bitmap.h>
8#include <linux/bitops.h> 9#include <linux/bitops.h>
9#include "idset.h" 10#include "idset.h"
10#include "css.h" 11#include "css.h"
@@ -90,14 +91,6 @@ void idset_sch_del(struct idset *set, struct subchannel_id schid)
90 idset_del(set, schid.ssid, schid.sch_no); 91 idset_del(set, schid.ssid, schid.sch_no);
91} 92}
92 93
93/* Clear ids starting from @schid up to end of subchannel set. */
94void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
95{
96 int pos = schid.ssid * set->num_id + schid.sch_no;
97
98 bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
99}
100
101int idset_sch_contains(struct idset *set, struct subchannel_id schid) 94int idset_sch_contains(struct idset *set, struct subchannel_id schid)
102{ 95{
103 return idset_contains(set, schid.ssid, schid.sch_no); 96 return idset_contains(set, schid.ssid, schid.sch_no);
@@ -120,12 +113,20 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
120 113
121int idset_is_empty(struct idset *set) 114int idset_is_empty(struct idset *set)
122{ 115{
123 return bitmap_empty(set->bitmap, set->num_ssid * set->num_id); 116 int bitnum;
117
118 bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
119 if (bitnum >= set->num_ssid * set->num_id)
120 return 1;
121 return 0;
124} 122}
125 123
126void idset_add_set(struct idset *to, struct idset *from) 124void idset_add_set(struct idset *to, struct idset *from)
127{ 125{
128 int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id); 126 unsigned long i, len;
129 127
130 bitmap_or(to->bitmap, to->bitmap, from->bitmap, len); 128 len = min(__BITOPS_WORDS(to->num_ssid * to->num_id),
129 __BITOPS_WORDS(from->num_ssid * from->num_id));
130 for (i = 0; i < len ; i++)
131 to->bitmap[i] |= from->bitmap[i];
131} 132}
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 06d3bc01bb0..7543da4529f 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2007, 2012 2 * drivers/s390/cio/idset.h
3 *
4 * Copyright IBM Corp. 2007
3 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
4 */ 6 */
5 7
@@ -17,7 +19,6 @@ void idset_fill(struct idset *set);
17struct idset *idset_sch_new(void); 19struct idset *idset_sch_new(void);
18void idset_sch_add(struct idset *set, struct subchannel_id id); 20void idset_sch_add(struct idset *set, struct subchannel_id id);
19void idset_sch_del(struct idset *set, struct subchannel_id id); 21void idset_sch_del(struct idset *set, struct subchannel_id id);
20void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
21int idset_sch_contains(struct idset *set, struct subchannel_id id); 22int idset_sch_contains(struct idset *set, struct subchannel_id id);
22int idset_sch_get_first(struct idset *set, struct subchannel_id *id); 23int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
23int idset_is_empty(struct idset *set); 24int idset_is_empty(struct idset *set);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 76253dfcc1b..ba31ad88f4f 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -4,7 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h> 6#include <asm/ccwdev.h>
7#include <asm/irq.h>
8#include "css.h" 7#include "css.h"
9#include "orb.h" 8#include "orb.h"
10 9
@@ -111,9 +110,6 @@ enum cdev_todo {
111 CDEV_TODO_UNREG_EVAL, 110 CDEV_TODO_UNREG_EVAL,
112}; 111};
113 112
114#define FAKE_CMD_IRB 1
115#define FAKE_TM_IRB 2
116
117struct ccw_device_private { 113struct ccw_device_private {
118 struct ccw_device *cdev; 114 struct ccw_device *cdev;
119 struct subchannel *sch; 115 struct subchannel *sch;
@@ -141,7 +137,7 @@ struct ccw_device_private {
141 unsigned int doverify:1; /* delayed path verification */ 137 unsigned int doverify:1; /* delayed path verification */
142 unsigned int donotify:1; /* call notify function */ 138 unsigned int donotify:1; /* call notify function */
143 unsigned int recog_done:1; /* dev. recog. complete */ 139 unsigned int recog_done:1; /* dev. recog. complete */
144 unsigned int fake_irb:2; /* deliver faked irb */ 140 unsigned int fake_irb:1; /* deliver faked irb */
145 unsigned int resuming:1; /* recognition while resume */ 141 unsigned int resuming:1; /* recognition while resume */
146 unsigned int pgroup:1; /* pathgroup is set up */ 142 unsigned int pgroup:1; /* pathgroup is set up */
147 unsigned int mpath:1; /* multipathing is set up */ 143 unsigned int mpath:1; /* multipathing is set up */
@@ -161,7 +157,6 @@ struct ccw_device_private {
161 struct list_head cmb_list; /* list of measured devices */ 157 struct list_head cmb_list; /* list of measured devices */
162 u64 cmb_start_time; /* clock value of cmb reset */ 158 u64 cmb_start_time; /* clock value of cmb reset */
163 void *cmb_wait; /* deferred cmb enable/disable */ 159 void *cmb_wait; /* deferred cmb enable/disable */
164 enum interruption_class int_class;
165}; 160};
166 161
167static inline int rsch(struct subchannel_id schid) 162static inline int rsch(struct subchannel_id schid)
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
index 7a640530e7f..45a9865c2b3 100644
--- a/drivers/s390/cio/orb.h
+++ b/drivers/s390/cio/orb.h
@@ -59,33 +59,9 @@ struct tm_orb {
59 u32:32; 59 u32:32;
60} __packed __aligned(4); 60} __packed __aligned(4);
61 61
62/*
63 * eadm operation request block
64 */
65struct eadm_orb {
66 u32 intparm;
67 u32 key:4;
68 u32:4;
69 u32 compat1:1;
70 u32 compat2:1;
71 u32:21;
72 u32 x:1;
73 u32 aob;
74 u32 css_prio:8;
75 u32:8;
76 u32 scm_prio:8;
77 u32:8;
78 u32:29;
79 u32 fmt:3;
80 u32:32;
81 u32:32;
82 u32:32;
83} __packed __aligned(4);
84
85union orb { 62union orb {
86 struct cmd_orb cmd; 63 struct cmd_orb cmd;
87 struct tm_orb tm; 64 struct tm_orb tm;
88 struct eadm_orb eadm;
89} __packed __aligned(4); 65} __packed __aligned(4);
90 66
91#endif /* S390_ORB_H */ 67#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 5132554d791..e5c966462c5 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2000, 2009 2 * linux/drivers/s390/cio/qdio.h
3 *
4 * Copyright 2000,2009 IBM Corp.
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
4 * Jan Glauber <jang@linux.vnet.ibm.com> 6 * Jan Glauber <jang@linux.vnet.ibm.com>
5 */ 7 */
@@ -16,6 +18,14 @@
16#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ 18#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
17#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ 19#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
18 20
21/*
22 * if an asynchronous HiperSockets queue runs full, the 10 seconds timer wait
23 * till next initiative to give transmitted skbs back to the stack is too long.
24 * Therefore polling is started in case of multicast queue is filled more
25 * than 50 percent.
26 */
27#define QDIO_IQDIO_POLL_LVL 65 /* HS multicast queue */
28
19enum qdio_irq_states { 29enum qdio_irq_states {
20 QDIO_IRQ_STATE_INACTIVE, 30 QDIO_IRQ_STATE_INACTIVE,
21 QDIO_IRQ_STATE_ESTABLISHED, 31 QDIO_IRQ_STATE_ESTABLISHED,
@@ -34,7 +44,6 @@ enum qdio_irq_states {
34#define SLSB_STATE_NOT_INIT 0x0 44#define SLSB_STATE_NOT_INIT 0x0
35#define SLSB_STATE_EMPTY 0x1 45#define SLSB_STATE_EMPTY 0x1
36#define SLSB_STATE_PRIMED 0x2 46#define SLSB_STATE_PRIMED 0x2
37#define SLSB_STATE_PENDING 0x3
38#define SLSB_STATE_HALTED 0xe 47#define SLSB_STATE_HALTED 0xe
39#define SLSB_STATE_ERROR 0xf 48#define SLSB_STATE_ERROR 0xf
40#define SLSB_TYPE_INPUT 0x0 49#define SLSB_TYPE_INPUT 0x0
@@ -58,8 +67,6 @@ enum qdio_irq_states {
58 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */ 67 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
59#define SLSB_P_OUTPUT_EMPTY \ 68#define SLSB_P_OUTPUT_EMPTY \
60 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */ 69 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
61#define SLSB_P_OUTPUT_PENDING \
62 (SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
63#define SLSB_CU_OUTPUT_PRIMED \ 70#define SLSB_CU_OUTPUT_PRIMED \
64 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */ 71 (SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
65#define SLSB_P_OUTPUT_HALTED \ 72#define SLSB_P_OUTPUT_HALTED \
@@ -77,11 +84,19 @@ enum qdio_irq_states {
77#define CHSC_FLAG_QDIO_CAPABILITY 0x80 84#define CHSC_FLAG_QDIO_CAPABILITY 0x80
78#define CHSC_FLAG_VALIDITY 0x40 85#define CHSC_FLAG_VALIDITY 0x40
79 86
87/* qdio adapter-characteristics-1 flag */
88#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
89#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
90#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
91#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
92#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
93#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
94#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
95
80/* SIGA flags */ 96/* SIGA flags */
81#define QDIO_SIGA_WRITE 0x00 97#define QDIO_SIGA_WRITE 0x00
82#define QDIO_SIGA_READ 0x01 98#define QDIO_SIGA_READ 0x01
83#define QDIO_SIGA_SYNC 0x02 99#define QDIO_SIGA_SYNC 0x02
84#define QDIO_SIGA_WRITEQ 0x04
85#define QDIO_SIGA_QEBSM_FLAG 0x80 100#define QDIO_SIGA_QEBSM_FLAG 0x80
86 101
87#ifdef CONFIG_64BIT 102#ifdef CONFIG_64BIT
@@ -238,12 +253,6 @@ struct qdio_input_q {
238struct qdio_output_q { 253struct qdio_output_q {
239 /* PCIs are enabled for the queue */ 254 /* PCIs are enabled for the queue */
240 int pci_out_enabled; 255 int pci_out_enabled;
241 /* cq: use asynchronous output buffers */
242 int use_cq;
243 /* cq: aobs used for particual SBAL */
244 struct qaob **aobs;
245 /* cq: sbal state related to asynchronous operation */
246 struct qdio_outbuf_state *sbal_state;
247 /* timer to check for more outbound work */ 256 /* timer to check for more outbound work */
248 struct timer_list timer; 257 struct timer_list timer;
249 /* used SBALs before tasklet schedule */ 258 /* used SBALs before tasklet schedule */
@@ -280,9 +289,6 @@ struct qdio_q {
280 /* error condition during a data transfer */ 289 /* error condition during a data transfer */
281 unsigned int qdio_error; 290 unsigned int qdio_error;
282 291
283 /* last scan of the queue */
284 u64 timestamp;
285
286 struct tasklet_struct tasklet; 292 struct tasklet_struct tasklet;
287 struct qdio_queue_perf_stat q_stats; 293 struct qdio_queue_perf_stat q_stats;
288 294
@@ -416,7 +422,20 @@ static inline int multicast_outbound(struct qdio_q *q)
416#define queue_irqs_disabled(q) \ 422#define queue_irqs_disabled(q) \
417 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) 423 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
418 424
419extern u64 last_ai_time; 425#define TIQDIO_SHARED_IND 63
426
427/* device state change indicators */
428struct indicator_t {
429 u32 ind; /* u32 because of compare-and-swap performance */
430 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
431};
432
433extern struct indicator_t *q_indicators;
434
435static inline int shared_ind(u32 *dsci)
436{
437 return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
438}
420 439
421/* prototypes for thin interrupt */ 440/* prototypes for thin interrupt */
422void qdio_setup_thinint(struct qdio_irq *irq_ptr); 441void qdio_setup_thinint(struct qdio_irq *irq_ptr);
@@ -429,8 +448,6 @@ int tiqdio_allocate_memory(void);
429void tiqdio_free_memory(void); 448void tiqdio_free_memory(void);
430int tiqdio_register_thinints(void); 449int tiqdio_register_thinints(void);
431void tiqdio_unregister_thinints(void); 450void tiqdio_unregister_thinints(void);
432void clear_nonshared_ind(struct qdio_irq *);
433int test_nonshared_ind(struct qdio_irq *);
434 451
435/* prototypes for setup */ 452/* prototypes for setup */
436void qdio_inbound_processing(unsigned long data); 453void qdio_inbound_processing(unsigned long data);
@@ -452,9 +469,6 @@ int qdio_setup_create_sysfs(struct ccw_device *cdev);
452void qdio_setup_destroy_sysfs(struct ccw_device *cdev); 469void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
453int qdio_setup_init(void); 470int qdio_setup_init(void);
454void qdio_setup_exit(void); 471void qdio_setup_exit(void);
455int qdio_enable_async_operation(struct qdio_output_q *q);
456void qdio_disable_async_operation(struct qdio_output_q *q);
457struct qaob *qdio_allocate_aob(void);
458 472
459int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 473int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
460 unsigned char *state); 474 unsigned char *state);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e6e0d31c02a..0e615cb912d 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright IBM Corp. 2008, 2009 2 * drivers/s390/cio/qdio_debug.c
3 *
4 * Copyright IBM Corp. 2008,2009
3 * 5 *
4 * Author: Jan Glauber (jang@linux.vnet.ibm.com) 6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
5 */ 7 */
6#include <linux/seq_file.h> 8#include <linux/seq_file.h>
7#include <linux/debugfs.h> 9#include <linux/debugfs.h>
8#include <linux/uaccess.h>
9#include <linux/export.h>
10#include <asm/debug.h> 10#include <asm/debug.h>
11#include "qdio_debug.h" 11#include "qdio_debug.h"
12#include "qdio.h" 12#include "qdio.h"
@@ -54,17 +54,15 @@ static int qstat_show(struct seq_file *m, void *v)
54 if (!q) 54 if (!q)
55 return 0; 55 return 0;
56 56
57 seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n", 57 seq_printf(m, "DSCI: %d nr_used: %d\n",
58 q->timestamp, last_ai_time); 58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
59 seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n", 59 seq_printf(m, "ftc: %d last_move: %d\n",
60 atomic_read(&q->nr_buf_used),
61 q->first_to_check, q->last_move); 60 q->first_to_check, q->last_move);
62 if (q->is_input_q) { 61 if (q->is_input_q) {
63 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 62 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
64 q->u.in.polling, q->u.in.ack_start, 63 q->u.in.polling, q->u.in.ack_start,
65 q->u.in.ack_count); 64 q->u.in.ack_count);
66 seq_printf(m, "DSCI: %d IRQs disabled: %u\n", 65 seq_printf(m, "IRQs disabled: %u\n",
67 *(u32 *)q->irq_ptr->dsci,
68 test_bit(QDIO_QUEUE_IRQS_DISABLED, 66 test_bit(QDIO_QUEUE_IRQS_DISABLED,
69 &q->u.in.queue_irq_state)); 67 &q->u.in.queue_irq_state));
70 } 68 }
@@ -78,9 +76,6 @@ static int qstat_show(struct seq_file *m, void *v)
78 case SLSB_P_OUTPUT_NOT_INIT: 76 case SLSB_P_OUTPUT_NOT_INIT:
79 seq_printf(m, "N"); 77 seq_printf(m, "N");
80 break; 78 break;
81 case SLSB_P_OUTPUT_PENDING:
82 seq_printf(m, "P");
83 break;
84 case SLSB_P_INPUT_PRIMED: 79 case SLSB_P_INPUT_PRIMED:
85 case SLSB_CU_OUTPUT_PRIMED: 80 case SLSB_CU_OUTPUT_PRIMED:
86 seq_printf(m, "+"); 81 seq_printf(m, "+");
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 7f8b973da29..5d70bd162ae 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/cio/qdio_debug.h
3 *
2 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008
3 * 5 *
4 * Author: Jan Glauber (jang@linux.vnet.ibm.com) 6 * Author: Jan Glauber (jang@linux.vnet.ibm.com)
@@ -37,14 +39,10 @@ static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
37 debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ 39 debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
38 } while (0) 40 } while (0)
39 41
40static inline void DBF_HEX(void *addr, int len) 42#define DBF_HEX(addr, len) \
41{ 43 do { \
42 while (len > 0) { 44 debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \
43 debug_event(qdio_dbf_setup, DBF_ERR, addr, len); 45 } while (0)
44 len -= qdio_dbf_setup->buf_size;
45 addr += qdio_dbf_setup->buf_size;
46 }
47}
48 46
49#define DBF_ERROR(text...) \ 47#define DBF_ERROR(text...) \
50 do { \ 48 do { \
@@ -53,14 +51,11 @@ static inline void DBF_HEX(void *addr, int len)
53 debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ 51 debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
54 } while (0) 52 } while (0)
55 53
56static inline void DBF_ERROR_HEX(void *addr, int len) 54#define DBF_ERROR_HEX(addr, len) \
57{ 55 do { \
58 while (len > 0) { 56 debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \
59 debug_event(qdio_dbf_error, DBF_ERR, addr, len); 57 } while (0)
60 len -= qdio_dbf_error->buf_size; 58
61 addr += qdio_dbf_error->buf_size;
62 }
63}
64 59
65#define DBF_DEV_EVENT(level, device, text...) \ 60#define DBF_DEV_EVENT(level, device, text...) \
66 do { \ 61 do { \
@@ -71,15 +66,10 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
71 } \ 66 } \
72 } while (0) 67 } while (0)
73 68
74static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr, 69#define DBF_DEV_HEX(level, device, addr, len) \
75 int len, int level) 70 do { \
76{ 71 debug_event(device->debug_area, level, (void*)(addr), len); \
77 while (len > 0) { 72 } while (0)
78 debug_event(dev->debug_area, level, addr, len);
79 len -= dev->debug_area->buf_size;
80 addr += dev->debug_area->buf_size;
81 }
82}
83 73
84void qdio_allocate_dbf(struct qdio_initialize *init_data, 74void qdio_allocate_dbf(struct qdio_initialize *init_data,
85 struct qdio_irq *irq_ptr); 75 struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 1671d3461f2..288c9140290 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/cio/qdio_main.c
3 *
2 * Linux for s390 qdio support, buffer handling, qdio API and module support. 4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
3 * 5 *
4 * Copyright IBM Corp. 2000, 2008 6 * Copyright 2000,2008 IBM Corp.
5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
6 * Jan Glauber <jang@linux.vnet.ibm.com> 8 * Jan Glauber <jang@linux.vnet.ibm.com>
7 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -12,11 +14,10 @@
12#include <linux/timer.h> 14#include <linux/timer.h>
13#include <linux/delay.h> 15#include <linux/delay.h>
14#include <linux/gfp.h> 16#include <linux/gfp.h>
15#include <linux/io.h> 17#include <linux/kernel_stat.h>
16#include <linux/atomic.h> 18#include <linux/atomic.h>
17#include <asm/debug.h> 19#include <asm/debug.h>
18#include <asm/qdio.h> 20#include <asm/qdio.h>
19#include <asm/ipl.h>
20 21
21#include "cio.h" 22#include "cio.h"
22#include "css.h" 23#include "css.h"
@@ -61,7 +62,7 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
61 " ipm %0\n" 62 " ipm %0\n"
62 " srl %0,28\n" 63 " srl %0,28\n"
63 : "=d" (cc) 64 : "=d" (cc)
64 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc"); 65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
65 return cc; 66 return cc;
66} 67}
67 68
@@ -72,27 +73,26 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
73 * @fc: function code to perform 74 * @fc: function code to perform
74 * 75 *
75 * Returns condition code. 76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
76 * Note: For IQDC unicast queues only the highest priority queue is processed. 77 * Note: For IQDC unicast queues only the highest priority queue is processed.
77 */ 78 */
78static inline int do_siga_output(unsigned long schid, unsigned long mask, 79static inline int do_siga_output(unsigned long schid, unsigned long mask,
79 unsigned int *bb, unsigned int fc, 80 unsigned int *bb, unsigned int fc)
80 unsigned long aob)
81{ 81{
82 register unsigned long __fc asm("0") = fc; 82 register unsigned long __fc asm("0") = fc;
83 register unsigned long __schid asm("1") = schid; 83 register unsigned long __schid asm("1") = schid;
84 register unsigned long __mask asm("2") = mask; 84 register unsigned long __mask asm("2") = mask;
85 register unsigned long __aob asm("3") = aob; 85 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
86 int cc;
87 86
88 asm volatile( 87 asm volatile(
89 " siga 0\n" 88 " siga 0\n"
90 " ipm %0\n" 89 "0: ipm %0\n"
91 " srl %0,28\n" 90 " srl %0,28\n"
92 : "=d" (cc), "+d" (__fc), "+d" (__aob) 91 "1:\n"
93 : "d" (__schid), "d" (__mask) 92 EX_TABLE(0b, 1b)
94 : "cc"); 93 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
95 *bb = __fc >> 31; 94 : : "cc", "memory");
95 *bb = ((unsigned int) __fc) >> 31;
96 return cc; 96 return cc;
97} 97}
98 98
@@ -101,12 +101,9 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
101 /* all done or next buffer state different */ 101 /* all done or next buffer state different */
102 if (ccq == 0 || ccq == 32) 102 if (ccq == 0 || ccq == 32)
103 return 0; 103 return 0;
104 /* no buffer processed */
105 if (ccq == 97)
106 return 1;
107 /* not all buffers processed */ 104 /* not all buffers processed */
108 if (ccq == 96) 105 if (ccq == 96 || ccq == 97)
109 return 2; 106 return 1;
110 /* notify devices immediately */ 107 /* notify devices immediately */
111 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 108 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
112 return -EIO; 109 return -EIO;
@@ -126,9 +123,12 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
126static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 123static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
127 int start, int count, int auto_ack) 124 int start, int count, int auto_ack)
128{ 125{
129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
130 unsigned int ccq = 0; 126 unsigned int ccq = 0;
127 int tmp_count = count, tmp_start = start;
128 int nr = q->nr;
129 int rc;
131 130
131 BUG_ON(!q->irq_ptr->sch_token);
132 qperf_inc(q, eqbs); 132 qperf_inc(q, eqbs);
133 133
134 if (!q->is_input_q) 134 if (!q->is_input_q)
@@ -137,33 +137,29 @@ again:
137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
138 auto_ack); 138 auto_ack);
139 rc = qdio_check_ccq(q, ccq); 139 rc = qdio_check_ccq(q, ccq);
140 if (!rc) 140
141 return count - tmp_count; 141 /* At least one buffer was processed, return and extract the remaining
142 * buffers later.
143 */
144 if ((ccq == 96) && (count != tmp_count)) {
145 qperf_inc(q, eqbs_partial);
146 return (count - tmp_count);
147 }
142 148
143 if (rc == 1) { 149 if (rc == 1) {
144 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
145 goto again; 151 goto again;
146 } 152 }
147 153
148 if (rc == 2) { 154 if (rc < 0) {
149 qperf_inc(q, eqbs_partial); 155 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", 156 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
151 tmp_count); 157 q->handler(q->irq_ptr->cdev,
152 /* 158 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
153 * Retry once, if that fails bail out and process the 159 0, -1, -1, q->irq_ptr->int_parm);
154 * extracted buffers before trying again. 160 return 0;
155 */
156 if (!retried++)
157 goto again;
158 else
159 return count - tmp_count;
160 } 161 }
161 162 return count - tmp_count;
162 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
163 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
164 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
165 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
166 return 0;
167} 163}
168 164
169/** 165/**
@@ -187,6 +183,8 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
187 183
188 if (!count) 184 if (!count)
189 return 0; 185 return 0;
186
187 BUG_ON(!q->irq_ptr->sch_token);
190 qperf_inc(q, sqbs); 188 qperf_inc(q, sqbs);
191 189
192 if (!q->is_input_q) 190 if (!q->is_input_q)
@@ -194,44 +192,41 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
194again: 192again:
195 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 193 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
196 rc = qdio_check_ccq(q, ccq); 194 rc = qdio_check_ccq(q, ccq);
197 if (!rc) { 195 if (rc == 1) {
198 WARN_ON_ONCE(tmp_count);
199 return count - tmp_count;
200 }
201
202 if (rc == 1 || rc == 2) {
203 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 196 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
204 qperf_inc(q, sqbs_partial); 197 qperf_inc(q, sqbs_partial);
205 goto again; 198 goto again;
206 } 199 }
207 200 if (rc < 0) {
208 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 201 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
209 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 202 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
210 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, 203 q->handler(q->irq_ptr->cdev,
211 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); 204 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
212 return 0; 205 0, -1, -1, q->irq_ptr->int_parm);
206 return 0;
207 }
208 WARN_ON(tmp_count);
209 return count - tmp_count;
213} 210}
214 211
215/* returns number of examined buffers and their common state in *state */ 212/* returns number of examined buffers and their common state in *state */
216static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 213static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
217 unsigned char *state, unsigned int count, 214 unsigned char *state, unsigned int count,
218 int auto_ack, int merge_pending) 215 int auto_ack)
219{ 216{
220 unsigned char __state = 0; 217 unsigned char __state = 0;
221 int i; 218 int i;
222 219
220 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
221 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
222
223 if (is_qebsm(q)) 223 if (is_qebsm(q))
224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
225 225
226 for (i = 0; i < count; i++) { 226 for (i = 0; i < count; i++) {
227 if (!__state) { 227 if (!__state)
228 __state = q->slsb.val[bufnr]; 228 __state = q->slsb.val[bufnr];
229 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) 229 else if (q->slsb.val[bufnr] != __state)
230 __state = SLSB_P_OUTPUT_EMPTY;
231 } else if (merge_pending) {
232 if ((q->slsb.val[bufnr] & __state) != __state)
233 break;
234 } else if (q->slsb.val[bufnr] != __state)
235 break; 230 break;
236 bufnr = next_buf(bufnr); 231 bufnr = next_buf(bufnr);
237 } 232 }
@@ -242,7 +237,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
242static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 237static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
243 unsigned char *state, int auto_ack) 238 unsigned char *state, int auto_ack)
244{ 239{
245 return get_buf_states(q, bufnr, state, 1, auto_ack, 0); 240 return get_buf_states(q, bufnr, state, 1, auto_ack);
246} 241}
247 242
248/* wrap-around safe setting of slsb states, returns number of changed buffers */ 243/* wrap-around safe setting of slsb states, returns number of changed buffers */
@@ -251,6 +246,9 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
251{ 246{
252 int i; 247 int i;
253 248
249 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
250 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
251
254 if (is_qebsm(q)) 252 if (is_qebsm(q))
255 return qdio_do_sqbs(q, state, bufnr, count); 253 return qdio_do_sqbs(q, state, bufnr, count);
256 254
@@ -268,7 +266,7 @@ static inline int set_buf_state(struct qdio_q *q, int bufnr,
268} 266}
269 267
270/* set slsb states to initial state */ 268/* set slsb states to initial state */
271static void qdio_init_buf_states(struct qdio_irq *irq_ptr) 269void qdio_init_buf_states(struct qdio_irq *irq_ptr)
272{ 270{
273 struct qdio_q *q; 271 struct qdio_q *q;
274 int i; 272 int i;
@@ -299,7 +297,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
299 cc = do_siga_sync(schid, output, input, fc); 297 cc = do_siga_sync(schid, output, input, fc);
300 if (unlikely(cc)) 298 if (unlikely(cc))
301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
302 return (cc) ? -EIO : 0; 300 return cc;
303} 301}
304 302
305static inline int qdio_siga_sync_q(struct qdio_q *q) 303static inline int qdio_siga_sync_q(struct qdio_q *q)
@@ -310,31 +308,23 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
310 return qdio_siga_sync(q, q->mask, 0); 308 return qdio_siga_sync(q, q->mask, 0);
311} 309}
312 310
313static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, 311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
314 unsigned long aob)
315{ 312{
316 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 313 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = QDIO_SIGA_WRITE; 314 unsigned int fc = QDIO_SIGA_WRITE;
318 u64 start_time = 0; 315 u64 start_time = 0;
319 int retries = 0, cc; 316 int retries = 0, cc;
320 unsigned long laob = 0;
321
322 if (q->u.out.use_cq && aob != 0) {
323 fc = QDIO_SIGA_WRITEQ;
324 laob = aob;
325 }
326 317
327 if (is_qebsm(q)) { 318 if (is_qebsm(q)) {
328 schid = q->irq_ptr->sch_token; 319 schid = q->irq_ptr->sch_token;
329 fc |= QDIO_SIGA_QEBSM_FLAG; 320 fc |= QDIO_SIGA_QEBSM_FLAG;
330 } 321 }
331again: 322again:
332 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) || 323 cc = do_siga_output(schid, q->mask, busy_bit, fc);
333 (aob && fc != QDIO_SIGA_WRITEQ));
334 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
335 324
336 /* hipersocket busy condition */ 325 /* hipersocket busy condition */
337 if (unlikely(*busy_bit)) { 326 if (unlikely(*busy_bit)) {
327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
338 retries++; 328 retries++;
339 329
340 if (!start_time) { 330 if (!start_time) {
@@ -369,7 +359,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
369 cc = do_siga_input(schid, q->mask, fc); 359 cc = do_siga_input(schid, q->mask, fc);
370 if (unlikely(cc)) 360 if (unlikely(cc))
371 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 361 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
372 return (cc) ? -EIO : 0; 362 return cc;
373} 363}
374 364
375#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) 365#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
@@ -389,7 +379,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
389{ 379{
390 if (need_siga_sync(q)) 380 if (need_siga_sync(q))
391 qdio_siga_sync_q(q); 381 qdio_siga_sync_q(q);
392 return get_buf_states(q, bufnr, state, 1, 0, 0); 382 return get_buf_states(q, bufnr, state, 1, 0);
393} 383}
394 384
395static inline void qdio_stop_polling(struct qdio_q *q) 385static inline void qdio_stop_polling(struct qdio_q *q)
@@ -428,7 +418,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
428 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : 418 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
429 SLSB_P_OUTPUT_NOT_INIT; 419 SLSB_P_OUTPUT_NOT_INIT;
430 420
431 q->qdio_error = QDIO_ERROR_SLSB_STATE; 421 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
432 422
433 /* special handling for no target buffer empty */ 423 /* special handling for no target buffer empty */
434 if ((!q->is_input_q && 424 if ((!q->is_input_q &&
@@ -436,7 +426,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
436 qperf_inc(q, target_full); 426 qperf_inc(q, target_full);
437 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 427 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
438 q->first_to_check); 428 q->first_to_check);
439 goto set; 429 return;
440 } 430 }
441 431
442 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); 432 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
@@ -446,7 +436,6 @@ static void process_buffer_error(struct qdio_q *q, int count)
446 q->sbal[q->first_to_check]->element[14].sflags, 436 q->sbal[q->first_to_check]->element[14].sflags,
447 q->sbal[q->first_to_check]->element[15].sflags); 437 q->sbal[q->first_to_check]->element[15].sflags);
448 438
449set:
450 /* 439 /*
451 * Interrupts may be avoided as long as the error is present 440 * Interrupts may be avoided as long as the error is present
452 * so change the buffer state immediately to avoid starvation. 441 * so change the buffer state immediately to avoid starvation.
@@ -504,8 +493,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
504 int count, stop; 493 int count, stop;
505 unsigned char state = 0; 494 unsigned char state = 0;
506 495
507 q->timestamp = get_clock();
508
509 /* 496 /*
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 497 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
511 * would return 0. 498 * would return 0.
@@ -520,7 +507,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
520 * No siga sync here, as a PCI or we after a thin interrupt 507 * No siga sync here, as a PCI or we after a thin interrupt
521 * already sync'ed the queues. 508 * already sync'ed the queues.
522 */ 509 */
523 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); 510 count = get_buf_states(q, q->first_to_check, &state, count, 1);
524 if (!count) 511 if (!count)
525 goto out; 512 goto out;
526 513
@@ -548,7 +535,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
548 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 535 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
549 break; 536 break;
550 default: 537 default:
551 WARN_ON_ONCE(1); 538 BUG();
552 } 539 }
553out: 540out:
554 return q->first_to_check; 541 return q->first_to_check;
@@ -560,7 +547,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
560 547
561 bufnr = get_inbound_buffer_frontier(q); 548 bufnr = get_inbound_buffer_frontier(q);
562 549
563 if (bufnr != q->last_move) { 550 if ((bufnr != q->last_move) || q->qdio_error) {
564 q->last_move = bufnr; 551 q->last_move = bufnr;
565 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 552 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
566 q->u.in.timestamp = get_clock(); 553 q->u.in.timestamp = get_clock();
@@ -603,104 +590,6 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
603 return 0; 590 return 0;
604} 591}
605 592
606static inline int contains_aobs(struct qdio_q *q)
607{
608 return !q->is_input_q && q->u.out.use_cq;
609}
610
611static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
612 int i, struct qaob *aob)
613{
614 int tmp;
615
616 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
617 (unsigned long) virt_to_phys(aob));
618 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
619 (unsigned long) aob->res0[0]);
620 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
621 (unsigned long) aob->res0[1]);
622 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
623 (unsigned long) aob->res0[2]);
624 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
625 (unsigned long) aob->res0[3]);
626 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
627 (unsigned long) aob->res0[4]);
628 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
629 (unsigned long) aob->res0[5]);
630 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
631 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
632 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
633 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
634 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
635 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
636 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
637 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
638 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
639 (unsigned long) aob->sba[tmp]);
640 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
641 (unsigned long) q->sbal[i]->element[tmp].addr);
642 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
643 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
644 q->sbal[i]->element[tmp].length);
645 }
646 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
647 for (tmp = 0; tmp < 2; ++tmp) {
648 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
649 (unsigned long) aob->res4[tmp]);
650 }
651 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
652 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
653}
654
655static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
656{
657 unsigned char state = 0;
658 int j, b = start;
659
660 if (!contains_aobs(q))
661 return;
662
663 for (j = 0; j < count; ++j) {
664 get_buf_state(q, b, &state, 0);
665 if (state == SLSB_P_OUTPUT_PENDING) {
666 struct qaob *aob = q->u.out.aobs[b];
667 if (aob == NULL)
668 continue;
669
670 q->u.out.sbal_state[b].flags |=
671 QDIO_OUTBUF_STATE_FLAG_PENDING;
672 q->u.out.aobs[b] = NULL;
673 } else if (state == SLSB_P_OUTPUT_EMPTY) {
674 q->u.out.sbal_state[b].aob = NULL;
675 }
676 b = next_buf(b);
677 }
678}
679
680static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
681 int bufnr)
682{
683 unsigned long phys_aob = 0;
684
685 if (!q->use_cq)
686 goto out;
687
688 if (!q->aobs[bufnr]) {
689 struct qaob *aob = qdio_allocate_aob();
690 q->aobs[bufnr] = aob;
691 }
692 if (q->aobs[bufnr]) {
693 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
694 q->sbal_state[bufnr].aob = q->aobs[bufnr];
695 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
696 phys_aob = virt_to_phys(q->aobs[bufnr]);
697 WARN_ON_ONCE(phys_aob & 0xFF);
698 }
699
700out:
701 return phys_aob;
702}
703
704static void qdio_kick_handler(struct qdio_q *q) 593static void qdio_kick_handler(struct qdio_q *q)
705{ 594{
706 int start = q->first_to_kick; 595 int start = q->first_to_kick;
@@ -721,8 +610,6 @@ static void qdio_kick_handler(struct qdio_q *q)
721 start, count); 610 start, count);
722 } 611 }
723 612
724 qdio_handle_aobs(q, start, count);
725
726 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 613 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
727 q->irq_ptr->int_parm); 614 q->irq_ptr->int_parm);
728 615
@@ -772,8 +659,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
772 int count, stop; 659 int count, stop;
773 unsigned char state = 0; 660 unsigned char state = 0;
774 661
775 q->timestamp = get_clock();
776
777 if (need_siga_sync(q)) 662 if (need_siga_sync(q))
778 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 663 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
779 !pci_out_supported(q)) || 664 !pci_out_supported(q)) ||
@@ -787,24 +672,23 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
787 */ 672 */
788 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 673 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
789 stop = add_buf(q->first_to_check, count); 674 stop = add_buf(q->first_to_check, count);
675
790 if (q->first_to_check == stop) 676 if (q->first_to_check == stop)
791 goto out; 677 return q->first_to_check;
792 678
793 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); 679 count = get_buf_states(q, q->first_to_check, &state, count, 0);
794 if (!count) 680 if (!count)
795 goto out; 681 return q->first_to_check;
796 682
797 switch (state) { 683 switch (state) {
798 case SLSB_P_OUTPUT_EMPTY: 684 case SLSB_P_OUTPUT_EMPTY:
799 /* the adapter got it */ 685 /* the adapter got it */
800 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, 686 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
801 "out empty:%1d %02x", q->nr, count);
802 687
803 atomic_sub(count, &q->nr_buf_used); 688 atomic_sub(count, &q->nr_buf_used);
804 q->first_to_check = add_buf(q->first_to_check, count); 689 q->first_to_check = add_buf(q->first_to_check, count);
805 if (q->irq_ptr->perf_stat_enabled) 690 if (q->irq_ptr->perf_stat_enabled)
806 account_sbals(q, count); 691 account_sbals(q, count);
807
808 break; 692 break;
809 case SLSB_P_OUTPUT_ERROR: 693 case SLSB_P_OUTPUT_ERROR:
810 process_buffer_error(q, count); 694 process_buffer_error(q, count);
@@ -817,17 +701,14 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
817 /* the adapter has not fetched the output yet */ 701 /* the adapter has not fetched the output yet */
818 if (q->irq_ptr->perf_stat_enabled) 702 if (q->irq_ptr->perf_stat_enabled)
819 q->q_stats.nr_sbal_nop++; 703 q->q_stats.nr_sbal_nop++;
820 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", 704 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
821 q->nr);
822 break; 705 break;
823 case SLSB_P_OUTPUT_NOT_INIT: 706 case SLSB_P_OUTPUT_NOT_INIT:
824 case SLSB_P_OUTPUT_HALTED: 707 case SLSB_P_OUTPUT_HALTED:
825 break; 708 break;
826 default: 709 default:
827 WARN_ON_ONCE(1); 710 BUG();
828 } 711 }
829
830out:
831 return q->first_to_check; 712 return q->first_to_check;
832} 713}
833 714
@@ -843,7 +724,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
843 724
844 bufnr = get_outbound_buffer_frontier(q); 725 bufnr = get_outbound_buffer_frontier(q);
845 726
846 if (bufnr != q->last_move) { 727 if ((bufnr != q->last_move) || q->qdio_error) {
847 q->last_move = bufnr; 728 q->last_move = bufnr;
848 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 729 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
849 return 1; 730 return 1;
@@ -851,7 +732,7 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q)
851 return 0; 732 return 0;
852} 733}
853 734
854static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) 735static int qdio_kick_outbound_q(struct qdio_q *q)
855{ 736{
856 int retries = 0, cc; 737 int retries = 0, cc;
857 unsigned int busy_bit; 738 unsigned int busy_bit;
@@ -863,7 +744,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
863retry: 744retry:
864 qperf_inc(q, siga_write); 745 qperf_inc(q, siga_write);
865 746
866 cc = qdio_siga_output(q, &busy_bit, aob); 747 cc = qdio_siga_output(q, &busy_bit);
867 switch (cc) { 748 switch (cc) {
868 case 0: 749 case 0:
869 break; 750 break;
@@ -874,16 +755,13 @@ retry:
874 goto retry; 755 goto retry;
875 } 756 }
876 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); 757 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
877 cc = -EBUSY; 758 cc |= QDIO_ERROR_SIGA_BUSY;
878 } else { 759 } else
879 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 760 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
880 cc = -ENOBUFS;
881 }
882 break; 761 break;
883 case 1: 762 case 1:
884 case 3: 763 case 3:
885 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 764 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
886 cc = -EIO;
887 break; 765 break;
888 } 766 }
889 if (retries) { 767 if (retries) {
@@ -896,7 +774,7 @@ retry:
896static void __qdio_outbound_processing(struct qdio_q *q) 774static void __qdio_outbound_processing(struct qdio_q *q)
897{ 775{
898 qperf_inc(q, tasklet_outbound); 776 qperf_inc(q, tasklet_outbound);
899 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); 777 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
900 778
901 if (qdio_outbound_q_moved(q)) 779 if (qdio_outbound_q_moved(q))
902 qdio_kick_handler(q); 780 qdio_kick_handler(q);
@@ -905,13 +783,21 @@ static void __qdio_outbound_processing(struct qdio_q *q)
905 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 783 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
906 goto sched; 784 goto sched;
907 785
786 /* bail out for HiperSockets unicast queues */
787 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
788 return;
789
790 if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
791 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
792 goto sched;
793
908 if (q->u.out.pci_out_enabled) 794 if (q->u.out.pci_out_enabled)
909 return; 795 return;
910 796
911 /* 797 /*
912 * Now we know that queue type is either qeth without pci enabled 798 * Now we know that queue type is either qeth without pci enabled
913 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY 799 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
914 * is noticed and outbound_handler is called after some time. 800 * EMPTY is noticed and outbound_handler is called after some time.
915 */ 801 */
916 if (qdio_outbound_q_done(q)) 802 if (qdio_outbound_q_done(q))
917 del_timer(&q->u.out.timer); 803 del_timer(&q->u.out.timer);
@@ -1035,9 +921,8 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1035 } 921 }
1036 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 922 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1037 q->irq_ptr->int_parm); 923 q->irq_ptr->int_parm);
1038 } else { 924 } else
1039 tasklet_schedule(&q->tasklet); 925 tasklet_schedule(&q->tasklet);
1040 }
1041 } 926 }
1042 927
1043 if (!pci_out_supported(q)) 928 if (!pci_out_supported(q))
@@ -1057,7 +942,6 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
1057{ 942{
1058 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 943 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1059 struct qdio_q *q; 944 struct qdio_q *q;
1060 int count;
1061 945
1062 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); 946 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1063 DBF_ERROR("intp :%lx", intparm); 947 DBF_ERROR("intp :%lx", intparm);
@@ -1071,17 +955,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
1071 dump_stack(); 955 dump_stack();
1072 goto no_handler; 956 goto no_handler;
1073 } 957 }
1074 958 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
1075 count = sub_buf(q->first_to_check, q->first_to_kick); 959 0, -1, -1, irq_ptr->int_parm);
1076 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1077 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1078no_handler: 960no_handler:
1079 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 961 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1080 /*
1081 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1082 * Therefore we call the LGR detection function here.
1083 */
1084 lgr_info_log();
1085} 962}
1086 963
1087static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 964static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
@@ -1118,14 +995,21 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1118 return; 995 return;
1119 } 996 }
1120 997
998 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
1121 if (irq_ptr->perf_stat_enabled) 999 if (irq_ptr->perf_stat_enabled)
1122 irq_ptr->perf_stat.qdio_int++; 1000 irq_ptr->perf_stat.qdio_int++;
1123 1001
1124 if (IS_ERR(irb)) { 1002 if (IS_ERR(irb)) {
1125 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 1003 switch (PTR_ERR(irb)) {
1126 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1004 case -EIO:
1127 wake_up(&cdev->private->wait_q); 1005 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1128 return; 1006 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1007 wake_up(&cdev->private->wait_q);
1008 return;
1009 default:
1010 WARN_ON(1);
1011 return;
1012 }
1129 } 1013 }
1130 qdio_irq_check_sense(irq_ptr, irb); 1014 qdio_irq_check_sense(irq_ptr, irb);
1131 cstat = irb->scsw.cmd.cstat; 1015 cstat = irb->scsw.cmd.cstat;
@@ -1151,7 +1035,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1151 case QDIO_IRQ_STATE_STOPPED: 1035 case QDIO_IRQ_STATE_STOPPED:
1152 break; 1036 break;
1153 default: 1037 default:
1154 WARN_ON_ONCE(1); 1038 WARN_ON(1);
1155 } 1039 }
1156 wake_up(&cdev->private->wait_q); 1040 wake_up(&cdev->private->wait_q);
1157} 1041}
@@ -1205,7 +1089,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1205 if (!irq_ptr) 1089 if (!irq_ptr)
1206 return -ENODEV; 1090 return -ENODEV;
1207 1091
1208 WARN_ON_ONCE(irqs_disabled()); 1092 BUG_ON(irqs_disabled());
1209 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1093 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1210 1094
1211 mutex_lock(&irq_ptr->setup_mutex); 1095 mutex_lock(&irq_ptr->setup_mutex);
@@ -1336,6 +1220,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
1336 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1220 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1337 if (!irq_ptr->qdr) 1221 if (!irq_ptr->qdr)
1338 goto out_rel; 1222 goto out_rel;
1223 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1339 1224
1340 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, 1225 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1341 init_data->no_output_qs)) 1226 init_data->no_output_qs))
@@ -1351,26 +1236,6 @@ out_err:
1351} 1236}
1352EXPORT_SYMBOL_GPL(qdio_allocate); 1237EXPORT_SYMBOL_GPL(qdio_allocate);
1353 1238
1354static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1355{
1356 struct qdio_q *q = irq_ptr->input_qs[0];
1357 int i, use_cq = 0;
1358
1359 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1360 use_cq = 1;
1361
1362 for_each_output_queue(irq_ptr, q, i) {
1363 if (use_cq) {
1364 if (qdio_enable_async_operation(&q->u.out) < 0) {
1365 use_cq = 0;
1366 continue;
1367 }
1368 } else
1369 qdio_disable_async_operation(&q->u.out);
1370 }
1371 DBF_EVENT("use_cq:%d", use_cq);
1372}
1373
1374/** 1239/**
1375 * qdio_establish - establish queues on a qdio subchannel 1240 * qdio_establish - establish queues on a qdio subchannel
1376 * @init_data: initialization data 1241 * @init_data: initialization data
@@ -1434,8 +1299,7 @@ int qdio_establish(struct qdio_initialize *init_data)
1434 } 1299 }
1435 1300
1436 qdio_setup_ssqd_info(irq_ptr); 1301 qdio_setup_ssqd_info(irq_ptr);
1437 1302 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1438 qdio_detect_hsicq(irq_ptr);
1439 1303
1440 /* qebsm is now setup if available, initialize buffer states */ 1304 /* qebsm is now setup if available, initialize buffer states */
1441 qdio_init_buf_states(irq_ptr); 1305 qdio_init_buf_states(irq_ptr);
@@ -1574,11 +1438,16 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1574 1438
1575set: 1439set:
1576 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1440 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1441
1577 used = atomic_add_return(count, &q->nr_buf_used) - count; 1442 used = atomic_add_return(count, &q->nr_buf_used) - count;
1443 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1444
1445 /* no need to signal as long as the adapter had free buffers */
1446 if (used)
1447 return 0;
1578 1448
1579 if (need_siga_in(q)) 1449 if (need_siga_in(q))
1580 return qdio_siga_input(q); 1450 return qdio_siga_input(q);
1581
1582 return 0; 1451 return 0;
1583} 1452}
1584 1453
@@ -1599,6 +1468,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1599 1468
1600 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1469 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1601 used = atomic_add_return(count, &q->nr_buf_used); 1470 used = atomic_add_return(count, &q->nr_buf_used);
1471 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1602 1472
1603 if (used == QDIO_MAX_BUFFERS_PER_Q) 1473 if (used == QDIO_MAX_BUFFERS_PER_Q)
1604 qperf_inc(q, outbound_queue_full); 1474 qperf_inc(q, outbound_queue_full);
@@ -1610,21 +1480,17 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1610 q->u.out.pci_out_enabled = 0; 1480 q->u.out.pci_out_enabled = 0;
1611 1481
1612 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1482 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1613 unsigned long phys_aob = 0; 1483 /* One SIGA-W per buffer required for unicast HiperSockets. */
1614
1615 /* One SIGA-W per buffer required for unicast HSI */
1616 WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); 1484 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1617 1485
1618 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); 1486 rc = qdio_kick_outbound_q(q);
1619
1620 rc = qdio_kick_outbound_q(q, phys_aob);
1621 } else if (need_siga_sync(q)) { 1487 } else if (need_siga_sync(q)) {
1622 rc = qdio_siga_sync_q(q); 1488 rc = qdio_siga_sync_q(q);
1623 } else { 1489 } else {
1624 /* try to fast requeue buffers */ 1490 /* try to fast requeue buffers */
1625 get_buf_state(q, prev_buf(bufnr), &state, 0); 1491 get_buf_state(q, prev_buf(bufnr), &state, 0);
1626 if (state != SLSB_CU_OUTPUT_PRIMED) 1492 if (state != SLSB_CU_OUTPUT_PRIMED)
1627 rc = qdio_kick_outbound_q(q, 0); 1493 rc = qdio_kick_outbound_q(q);
1628 else 1494 else
1629 qperf_inc(q, fast_requeue); 1495 qperf_inc(q, fast_requeue);
1630 } 1496 }
@@ -1663,7 +1529,7 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1663 "do%02x b:%02x c:%02x", callflags, bufnr, count); 1529 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1664 1530
1665 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1531 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1666 return -EIO; 1532 return -EBUSY;
1667 if (!count) 1533 if (!count)
1668 return 0; 1534 return 0;
1669 if (callflags & QDIO_FLAG_SYNC_INPUT) 1535 if (callflags & QDIO_FLAG_SYNC_INPUT)
@@ -1694,7 +1560,11 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1694 return -ENODEV; 1560 return -ENODEV;
1695 q = irq_ptr->input_qs[nr]; 1561 q = irq_ptr->input_qs[nr];
1696 1562
1697 clear_nonshared_ind(irq_ptr); 1563 WARN_ON(queue_irqs_enabled(q));
1564
1565 if (!shared_ind(q->irq_ptr->dsci))
1566 xchg(q->irq_ptr->dsci, 0);
1567
1698 qdio_stop_polling(q); 1568 qdio_stop_polling(q);
1699 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); 1569 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1700 1570
@@ -1702,7 +1572,7 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1702 * We need to check again to not lose initiative after 1572 * We need to check again to not lose initiative after
1703 * resetting the ACK state. 1573 * resetting the ACK state.
1704 */ 1574 */
1705 if (test_nonshared_ind(irq_ptr)) 1575 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1706 goto rescan; 1576 goto rescan;
1707 if (!qdio_inbound_q_done(q)) 1577 if (!qdio_inbound_q_done(q))
1708 goto rescan; 1578 goto rescan;
@@ -1740,6 +1610,7 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1740 if (!irq_ptr) 1610 if (!irq_ptr)
1741 return -ENODEV; 1611 return -ENODEV;
1742 q = irq_ptr->input_qs[nr]; 1612 q = irq_ptr->input_qs[nr];
1613 WARN_ON(queue_irqs_enabled(q));
1743 1614
1744 /* 1615 /*
1745 * Cannot rely on automatic sync after interrupt since queues may 1616 * Cannot rely on automatic sync after interrupt since queues may
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 16ecd35b8e5..89107d0938c 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * driver/s390/cio/qdio_setup.c
3 *
2 * qdio queue initialization 4 * qdio queue initialization
3 * 5 *
4 * Copyright IBM Corp. 2008 6 * Copyright (C) IBM Corp. 2008
5 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> 7 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
6 */ 8 */
7#include <linux/kernel.h> 9#include <linux/kernel.h>
8#include <linux/slab.h> 10#include <linux/slab.h>
9#include <linux/export.h>
10#include <asm/qdio.h> 11#include <asm/qdio.h>
11 12
12#include "cio.h" 13#include "cio.h"
@@ -18,19 +19,6 @@
18#include "qdio_debug.h" 19#include "qdio_debug.h"
19 20
20static struct kmem_cache *qdio_q_cache; 21static struct kmem_cache *qdio_q_cache;
21static struct kmem_cache *qdio_aob_cache;
22
23struct qaob *qdio_allocate_aob(void)
24{
25 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
26}
27EXPORT_SYMBOL_GPL(qdio_allocate_aob);
28
29void qdio_release_aob(struct qaob *aob)
30{
31 kmem_cache_free(qdio_aob_cache, aob);
32}
33EXPORT_SYMBOL_GPL(qdio_release_aob);
34 22
35/* 23/*
36 * qebsm is only available under 64bit but the adapter sets the feature 24 * qebsm is only available under 64bit but the adapter sets the feature
@@ -140,8 +128,10 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
140 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); 128 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
141 129
142 /* fill in sbal */ 130 /* fill in sbal */
143 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 131 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
144 q->sbal[j] = *sbals_array++; 132 q->sbal[j] = *sbals_array++;
133 BUG_ON((unsigned long)q->sbal[j] & 0xff);
134 }
145 135
146 /* fill in slib */ 136 /* fill in slib */
147 if (i > 0) { 137 if (i > 0) {
@@ -164,37 +154,29 @@ static void setup_queues(struct qdio_irq *irq_ptr,
164 struct qdio_q *q; 154 struct qdio_q *q;
165 void **input_sbal_array = qdio_init->input_sbal_addr_array; 155 void **input_sbal_array = qdio_init->input_sbal_addr_array;
166 void **output_sbal_array = qdio_init->output_sbal_addr_array; 156 void **output_sbal_array = qdio_init->output_sbal_addr_array;
167 struct qdio_outbuf_state *output_sbal_state_array =
168 qdio_init->output_sbal_state_array;
169 int i; 157 int i;
170 158
171 for_each_input_queue(irq_ptr, q, i) { 159 for_each_input_queue(irq_ptr, q, i) {
172 DBF_EVENT("inq:%1d", i); 160 DBF_EVENT("in-q:%1d", i);
173 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
174 162
175 q->is_input_q = 1; 163 q->is_input_q = 1;
176 q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? 164 q->u.in.queue_start_poll = qdio_init->queue_start_poll;
177 qdio_init->queue_start_poll_array[i] : NULL;
178
179 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 165 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
180 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 166 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
181 167
182 if (is_thinint_irq(irq_ptr)) { 168 if (is_thinint_irq(irq_ptr))
183 tasklet_init(&q->tasklet, tiqdio_inbound_processing, 169 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
184 (unsigned long) q); 170 (unsigned long) q);
185 } else { 171 else
186 tasklet_init(&q->tasklet, qdio_inbound_processing, 172 tasklet_init(&q->tasklet, qdio_inbound_processing,
187 (unsigned long) q); 173 (unsigned long) q);
188 }
189 } 174 }
190 175
191 for_each_output_queue(irq_ptr, q, i) { 176 for_each_output_queue(irq_ptr, q, i) {
192 DBF_EVENT("outq:%1d", i); 177 DBF_EVENT("outq:%1d", i);
193 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
194 179
195 q->u.out.sbal_state = output_sbal_state_array;
196 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
197
198 q->is_input_q = 0; 180 q->is_input_q = 0;
199 q->u.out.scan_threshold = qdio_init->scan_threshold; 181 q->u.out.scan_threshold = qdio_init->scan_threshold;
200 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 182 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
@@ -307,8 +289,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
307 289
308 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); 290 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
309 process_ac_flags(irq_ptr, qdioac); 291 process_ac_flags(irq_ptr, qdioac);
310 DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2); 292 DBF_EVENT("qdioac:%4x", qdioac);
311 DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
312} 293}
313 294
314void qdio_release_memory(struct qdio_irq *irq_ptr) 295void qdio_release_memory(struct qdio_irq *irq_ptr)
@@ -330,19 +311,6 @@ void qdio_release_memory(struct qdio_irq *irq_ptr)
330 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 311 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
331 q = irq_ptr->output_qs[i]; 312 q = irq_ptr->output_qs[i];
332 if (q) { 313 if (q) {
333 if (q->u.out.use_cq) {
334 int n;
335
336 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
337 struct qaob *aob = q->u.out.aobs[n];
338 if (aob) {
339 qdio_release_aob(aob);
340 q->u.out.aobs[n] = NULL;
341 }
342 }
343
344 qdio_disable_async_operation(&q->u.out);
345 }
346 free_page((unsigned long) q->slib); 314 free_page((unsigned long) q->slib);
347 kmem_cache_free(qdio_q_cache, q); 315 kmem_cache_free(qdio_q_cache, q);
348 } 316 }
@@ -377,7 +345,6 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
377 int i; 345 int i;
378 346
379 irq_ptr->qdr->qfmt = qdio_init->q_format; 347 irq_ptr->qdr->qfmt = qdio_init->q_format;
380 irq_ptr->qdr->ac = qdio_init->qdr_ac;
381 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; 348 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
382 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; 349 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
383 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
@@ -432,8 +399,9 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
432 irq_ptr->int_parm = init_data->int_parm; 399 irq_ptr->int_parm = init_data->int_parm;
433 irq_ptr->nr_input_qs = init_data->no_input_qs; 400 irq_ptr->nr_input_qs = init_data->no_input_qs;
434 irq_ptr->nr_output_qs = init_data->no_output_qs; 401 irq_ptr->nr_output_qs = init_data->no_output_qs;
402
403 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
435 irq_ptr->cdev = init_data->cdev; 404 irq_ptr->cdev = init_data->cdev;
436 ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
437 setup_queues(irq_ptr, init_data); 405 setup_queues(irq_ptr, init_data);
438 406
439 setup_qib(irq_ptr, init_data); 407 setup_qib(irq_ptr, init_data);
@@ -480,7 +448,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
480 char s[80]; 448 char s[80];
481 449
482 snprintf(s, 80, "qdio: %s %s on SC %x using " 450 snprintf(s, 80, "qdio: %s %s on SC %x using "
483 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n", 451 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
484 dev_name(&cdev->dev), 452 dev_name(&cdev->dev),
485 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 453 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
486 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 454 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -497,60 +465,23 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
497 printk(KERN_INFO "%s", s); 465 printk(KERN_INFO "%s", s);
498} 466}
499 467
500int qdio_enable_async_operation(struct qdio_output_q *outq)
501{
502 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
503 GFP_ATOMIC);
504 if (!outq->aobs) {
505 outq->use_cq = 0;
506 return -ENOMEM;
507 }
508 outq->use_cq = 1;
509 return 0;
510}
511
512void qdio_disable_async_operation(struct qdio_output_q *q)
513{
514 kfree(q->aobs);
515 q->aobs = NULL;
516 q->use_cq = 0;
517}
518
519int __init qdio_setup_init(void) 468int __init qdio_setup_init(void)
520{ 469{
521 int rc;
522
523 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 470 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
524 256, 0, NULL); 471 256, 0, NULL);
525 if (!qdio_q_cache) 472 if (!qdio_q_cache)
526 return -ENOMEM; 473 return -ENOMEM;
527 474
528 qdio_aob_cache = kmem_cache_create("qdio_aob",
529 sizeof(struct qaob),
530 sizeof(struct qaob),
531 0,
532 NULL);
533 if (!qdio_aob_cache) {
534 rc = -ENOMEM;
535 goto free_qdio_q_cache;
536 }
537
538 /* Check for OSA/FCP thin interrupts (bit 67). */ 475 /* Check for OSA/FCP thin interrupts (bit 67). */
539 DBF_EVENT("thinint:%1d", 476 DBF_EVENT("thinint:%1d",
540 (css_general_characteristics.aif_osa) ? 1 : 0); 477 (css_general_characteristics.aif_osa) ? 1 : 0);
541 478
542 /* Check for QEBSM support in general (bit 58). */ 479 /* Check for QEBSM support in general (bit 58). */
543 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); 480 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
544 rc = 0; 481 return 0;
545out:
546 return rc;
547free_qdio_q_cache:
548 kmem_cache_destroy(qdio_q_cache);
549 goto out;
550} 482}
551 483
552void qdio_setup_exit(void) 484void qdio_setup_exit(void)
553{ 485{
554 kmem_cache_destroy(qdio_aob_cache);
555 kmem_cache_destroy(qdio_q_cache); 486 kmem_cache_destroy(qdio_q_cache);
556} 487}
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index bde5255200d..2a1d4dfaf85 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2000, 2009 2 * linux/drivers/s390/cio/thinint_qdio.c
3 *
4 * Copyright 2000,2009 IBM Corp.
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
4 * Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Cornelia Huck <cornelia.huck@de.ibm.com>
5 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * Jan Glauber <jang@linux.vnet.ibm.com>
@@ -24,24 +26,17 @@
24 */ 26 */
25#define TIQDIO_NR_NONSHARED_IND 63 27#define TIQDIO_NR_NONSHARED_IND 63
26#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 28#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
27#define TIQDIO_SHARED_IND 63
28
29/* device state change indicators */
30struct indicator_t {
31 u32 ind; /* u32 because of compare-and-swap performance */
32 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
33};
34 29
35/* list of thin interrupt input queues */ 30/* list of thin interrupt input queues */
36static LIST_HEAD(tiq_list); 31static LIST_HEAD(tiq_list);
37static DEFINE_MUTEX(tiq_list_lock); 32DEFINE_MUTEX(tiq_list_lock);
38 33
39/* adapter local summary indicator */ 34/* adapter local summary indicator */
40static u8 *tiqdio_alsi; 35static u8 *tiqdio_alsi;
41 36
42static struct indicator_t *q_indicators; 37struct indicator_t *q_indicators;
43 38
44u64 last_ai_time; 39static u64 last_ai_time;
45 40
46/* returns addr for the device state change indicator */ 41/* returns addr for the device state change indicator */
47static u32 *get_indicator(void) 42static u32 *get_indicator(void)
@@ -72,8 +67,12 @@ static void put_indicator(u32 *addr)
72 67
73void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 68void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
74{ 69{
70 struct qdio_q *q;
71 int i;
72
75 mutex_lock(&tiq_list_lock); 73 mutex_lock(&tiq_list_lock);
76 list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); 74 for_each_input_queue(irq_ptr, q, i)
75 list_add_rcu(&q->entry, &tiq_list);
77 mutex_unlock(&tiq_list_lock); 76 mutex_unlock(&tiq_list_lock);
78 xchg(irq_ptr->dsci, 1 << 7); 77 xchg(irq_ptr->dsci, 1 << 7);
79} 78}
@@ -81,53 +80,19 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
81void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) 80void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
82{ 81{
83 struct qdio_q *q; 82 struct qdio_q *q;
83 int i;
84 84
85 q = irq_ptr->input_qs[0]; 85 for (i = 0; i < irq_ptr->nr_input_qs; i++) {
86 /* if establish triggered an error */ 86 q = irq_ptr->input_qs[i];
87 if (!q || !q->entry.prev || !q->entry.next) 87 /* if establish triggered an error */
88 return; 88 if (!q || !q->entry.prev || !q->entry.next)
89 89 continue;
90 mutex_lock(&tiq_list_lock);
91 list_del_rcu(&q->entry);
92 mutex_unlock(&tiq_list_lock);
93 synchronize_rcu();
94}
95
96static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
97{
98 return irq_ptr->nr_input_qs > 1;
99}
100
101static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
102{
103 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
104}
105
106static inline int shared_ind(struct qdio_irq *irq_ptr)
107{
108 return references_shared_dsci(irq_ptr) ||
109 has_multiple_inq_on_dsci(irq_ptr);
110}
111
112void clear_nonshared_ind(struct qdio_irq *irq_ptr)
113{
114 if (!is_thinint_irq(irq_ptr))
115 return;
116 if (shared_ind(irq_ptr))
117 return;
118 xchg(irq_ptr->dsci, 0);
119}
120 90
121int test_nonshared_ind(struct qdio_irq *irq_ptr) 91 mutex_lock(&tiq_list_lock);
122{ 92 list_del_rcu(&q->entry);
123 if (!is_thinint_irq(irq_ptr)) 93 mutex_unlock(&tiq_list_lock);
124 return 0; 94 synchronize_rcu();
125 if (shared_ind(irq_ptr)) 95 }
126 return 0;
127 if (*irq_ptr->dsci)
128 return 1;
129 else
130 return 0;
131} 96}
132 97
133static inline u32 clear_shared_ind(void) 98static inline u32 clear_shared_ind(void)
@@ -137,40 +102,6 @@ static inline u32 clear_shared_ind(void)
137 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 102 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
138} 103}
139 104
140static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
141{
142 struct qdio_q *q;
143 int i;
144
145 for_each_input_queue(irq, q, i) {
146 if (!references_shared_dsci(irq) &&
147 has_multiple_inq_on_dsci(irq))
148 xchg(q->irq_ptr->dsci, 0);
149
150 if (q->u.in.queue_start_poll) {
151 /* skip if polling is enabled or already in work */
152 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
153 &q->u.in.queue_irq_state)) {
154 qperf_inc(q, int_discarded);
155 continue;
156 }
157
158 /* avoid dsci clear here, done after processing */
159 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
160 q->irq_ptr->int_parm);
161 } else {
162 if (!shared_ind(q->irq_ptr))
163 xchg(q->irq_ptr->dsci, 0);
164
165 /*
166 * Call inbound processing but not directly
167 * since that could starve other thinint queues.
168 */
169 tasklet_schedule(&q->tasklet);
170 }
171 }
172}
173
174/** 105/**
175 * tiqdio_thinint_handler - thin interrupt handler for qdio 106 * tiqdio_thinint_handler - thin interrupt handler for qdio
176 * @alsi: pointer to adapter local summary indicator 107 * @alsi: pointer to adapter local summary indicator
@@ -182,25 +113,42 @@ static void tiqdio_thinint_handler(void *alsi, void *data)
182 struct qdio_q *q; 113 struct qdio_q *q;
183 114
184 last_ai_time = S390_lowcore.int_clock; 115 last_ai_time = S390_lowcore.int_clock;
185 inc_irq_stat(IRQIO_QAI); 116 kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
186 117
187 /* protect tiq_list entries, only changed in activate or shutdown */ 118 /* protect tiq_list entries, only changed in activate or shutdown */
188 rcu_read_lock(); 119 rcu_read_lock();
189 120
190 /* check for work on all inbound thinint queues */ 121 /* check for work on all inbound thinint queues */
191 list_for_each_entry_rcu(q, &tiq_list, entry) { 122 list_for_each_entry_rcu(q, &tiq_list, entry) {
192 struct qdio_irq *irq;
193 123
194 /* only process queues from changed sets */ 124 /* only process queues from changed sets */
195 irq = q->irq_ptr; 125 if (unlikely(shared_ind(q->irq_ptr->dsci))) {
196 if (unlikely(references_shared_dsci(irq))) {
197 if (!si_used) 126 if (!si_used)
198 continue; 127 continue;
199 } else if (!*irq->dsci) 128 } else if (!*q->irq_ptr->dsci)
200 continue; 129 continue;
201 130
202 tiqdio_call_inq_handlers(irq); 131 if (q->u.in.queue_start_poll) {
132 /* skip if polling is enabled or already in work */
133 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
134 &q->u.in.queue_irq_state)) {
135 qperf_inc(q, int_discarded);
136 continue;
137 }
203 138
139 /* avoid dsci clear here, done after processing */
140 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
141 q->irq_ptr->int_parm);
142 } else {
143 /* only clear it if the indicator is non-shared */
144 if (!shared_ind(q->irq_ptr->dsci))
145 xchg(q->irq_ptr->dsci, 0);
146 /*
147 * Call inbound processing but not directly
148 * since that could starve other thinint queues.
149 */
150 tasklet_schedule(&q->tasklet);
151 }
204 qperf_inc(q, adapter_int); 152 qperf_inc(q, adapter_int);
205 } 153 }
206 rcu_read_unlock(); 154 rcu_read_unlock();
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
deleted file mode 100644
index bcf20f3aa51..00000000000
--- a/drivers/s390/cio/scm.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * Recognize and maintain s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/device.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/err.h>
14#include <asm/eadm.h>
15#include "chsc.h"
16
17static struct device *scm_root;
18static struct eadm_ops *eadm_ops;
19static DEFINE_MUTEX(eadm_ops_mutex);
20
21#define to_scm_dev(n) container_of(n, struct scm_device, dev)
22#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
23
24static int scmdev_probe(struct device *dev)
25{
26 struct scm_device *scmdev = to_scm_dev(dev);
27 struct scm_driver *scmdrv = to_scm_drv(dev->driver);
28
29 return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
30}
31
32static int scmdev_remove(struct device *dev)
33{
34 struct scm_device *scmdev = to_scm_dev(dev);
35 struct scm_driver *scmdrv = to_scm_drv(dev->driver);
36
37 return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
38}
39
40static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
41{
42 return add_uevent_var(env, "MODALIAS=scm:scmdev");
43}
44
45static struct bus_type scm_bus_type = {
46 .name = "scm",
47 .probe = scmdev_probe,
48 .remove = scmdev_remove,
49 .uevent = scmdev_uevent,
50};
51
52/**
53 * scm_driver_register() - register a scm driver
54 * @scmdrv: driver to be registered
55 */
56int scm_driver_register(struct scm_driver *scmdrv)
57{
58 struct device_driver *drv = &scmdrv->drv;
59
60 drv->bus = &scm_bus_type;
61
62 return driver_register(drv);
63}
64EXPORT_SYMBOL_GPL(scm_driver_register);
65
66/**
67 * scm_driver_unregister() - deregister a scm driver
68 * @scmdrv: driver to be deregistered
69 */
70void scm_driver_unregister(struct scm_driver *scmdrv)
71{
72 driver_unregister(&scmdrv->drv);
73}
74EXPORT_SYMBOL_GPL(scm_driver_unregister);
75
76int scm_get_ref(void)
77{
78 int ret = 0;
79
80 mutex_lock(&eadm_ops_mutex);
81 if (!eadm_ops || !try_module_get(eadm_ops->owner))
82 ret = -ENOENT;
83 mutex_unlock(&eadm_ops_mutex);
84
85 return ret;
86}
87EXPORT_SYMBOL_GPL(scm_get_ref);
88
89void scm_put_ref(void)
90{
91 mutex_lock(&eadm_ops_mutex);
92 module_put(eadm_ops->owner);
93 mutex_unlock(&eadm_ops_mutex);
94}
95EXPORT_SYMBOL_GPL(scm_put_ref);
96
97void register_eadm_ops(struct eadm_ops *ops)
98{
99 mutex_lock(&eadm_ops_mutex);
100 eadm_ops = ops;
101 mutex_unlock(&eadm_ops_mutex);
102}
103EXPORT_SYMBOL_GPL(register_eadm_ops);
104
105void unregister_eadm_ops(struct eadm_ops *ops)
106{
107 mutex_lock(&eadm_ops_mutex);
108 eadm_ops = NULL;
109 mutex_unlock(&eadm_ops_mutex);
110}
111EXPORT_SYMBOL_GPL(unregister_eadm_ops);
112
113int scm_start_aob(struct aob *aob)
114{
115 return eadm_ops->eadm_start(aob);
116}
117EXPORT_SYMBOL_GPL(scm_start_aob);
118
119void scm_irq_handler(struct aob *aob, int error)
120{
121 struct aob_rq_header *aobrq = (void *) aob->request.data;
122 struct scm_device *scmdev = aobrq->scmdev;
123 struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
124
125 scmdrv->handler(scmdev, aobrq->data, error);
126}
127EXPORT_SYMBOL_GPL(scm_irq_handler);
128
129#define scm_attr(name) \
130static ssize_t show_##name(struct device *dev, \
131 struct device_attribute *attr, char *buf) \
132{ \
133 struct scm_device *scmdev = to_scm_dev(dev); \
134 int ret; \
135 \
136 device_lock(dev); \
137 ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
138 device_unlock(dev); \
139 \
140 return ret; \
141} \
142static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
143
144scm_attr(persistence);
145scm_attr(oper_state);
146scm_attr(data_state);
147scm_attr(rank);
148scm_attr(release);
149scm_attr(res_id);
150
151static struct attribute *scmdev_attrs[] = {
152 &dev_attr_persistence.attr,
153 &dev_attr_oper_state.attr,
154 &dev_attr_data_state.attr,
155 &dev_attr_rank.attr,
156 &dev_attr_release.attr,
157 &dev_attr_res_id.attr,
158 NULL,
159};
160
161static struct attribute_group scmdev_attr_group = {
162 .attrs = scmdev_attrs,
163};
164
165static const struct attribute_group *scmdev_attr_groups[] = {
166 &scmdev_attr_group,
167 NULL,
168};
169
170static void scmdev_release(struct device *dev)
171{
172 struct scm_device *scmdev = to_scm_dev(dev);
173
174 kfree(scmdev);
175}
176
177static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
178 unsigned int size, unsigned int max_blk_count)
179{
180 dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
181 scmdev->nr_max_block = max_blk_count;
182 scmdev->address = sale->sa;
183 scmdev->size = 1UL << size;
184 scmdev->attrs.rank = sale->rank;
185 scmdev->attrs.persistence = sale->p;
186 scmdev->attrs.oper_state = sale->op_state;
187 scmdev->attrs.data_state = sale->data_state;
188 scmdev->attrs.rank = sale->rank;
189 scmdev->attrs.release = sale->r;
190 scmdev->attrs.res_id = sale->rid;
191 scmdev->dev.parent = scm_root;
192 scmdev->dev.bus = &scm_bus_type;
193 scmdev->dev.release = scmdev_release;
194 scmdev->dev.groups = scmdev_attr_groups;
195}
196
197/*
198 * Check for state-changes, notify the driver and userspace.
199 */
200static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
201{
202 struct scm_driver *scmdrv;
203 bool changed;
204
205 device_lock(&scmdev->dev);
206 changed = scmdev->attrs.rank != sale->rank ||
207 scmdev->attrs.oper_state != sale->op_state;
208 scmdev->attrs.rank = sale->rank;
209 scmdev->attrs.oper_state = sale->op_state;
210 if (!scmdev->dev.driver)
211 goto out;
212 scmdrv = to_scm_drv(scmdev->dev.driver);
213 if (changed && scmdrv->notify)
214 scmdrv->notify(scmdev);
215out:
216 device_unlock(&scmdev->dev);
217 if (changed)
218 kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
219}
220
221static int check_address(struct device *dev, void *data)
222{
223 struct scm_device *scmdev = to_scm_dev(dev);
224 struct sale *sale = data;
225
226 return scmdev->address == sale->sa;
227}
228
229static struct scm_device *scmdev_find(struct sale *sale)
230{
231 struct device *dev;
232
233 dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
234
235 return dev ? to_scm_dev(dev) : NULL;
236}
237
238static int scm_add(struct chsc_scm_info *scm_info, size_t num)
239{
240 struct sale *sale, *scmal = scm_info->scmal;
241 struct scm_device *scmdev;
242 int ret;
243
244 for (sale = scmal; sale < scmal + num; sale++) {
245 scmdev = scmdev_find(sale);
246 if (scmdev) {
247 scmdev_update(scmdev, sale);
248 /* Release reference from scm_find(). */
249 put_device(&scmdev->dev);
250 continue;
251 }
252 scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
253 if (!scmdev)
254 return -ENODEV;
255 scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
256 ret = device_register(&scmdev->dev);
257 if (ret) {
258 /* Release reference from device_initialize(). */
259 put_device(&scmdev->dev);
260 return ret;
261 }
262 }
263
264 return 0;
265}
266
267int scm_update_information(void)
268{
269 struct chsc_scm_info *scm_info;
270 u64 token = 0;
271 size_t num;
272 int ret;
273
274 scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
275 if (!scm_info)
276 return -ENOMEM;
277
278 do {
279 ret = chsc_scm_info(scm_info, token);
280 if (ret)
281 break;
282
283 num = (scm_info->response.length -
284 (offsetof(struct chsc_scm_info, scmal) -
285 offsetof(struct chsc_scm_info, response))
286 ) / sizeof(struct sale);
287
288 ret = scm_add(scm_info, num);
289 if (ret)
290 break;
291
292 token = scm_info->restok;
293 } while (token);
294
295 free_page((unsigned long)scm_info);
296
297 return ret;
298}
299
300static int __init scm_init(void)
301{
302 int ret;
303
304 ret = bus_register(&scm_bus_type);
305 if (ret)
306 return ret;
307
308 scm_root = root_device_register("scm");
309 if (IS_ERR(scm_root)) {
310 bus_unregister(&scm_bus_type);
311 return PTR_ERR(scm_root);
312 }
313
314 scm_update_information();
315 return 0;
316}
317subsys_initcall_sync(scm_init);
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 771faf7094d..f0a12d2eb78 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -2,7 +2,16 @@
2# S/390 crypto devices 2# S/390 crypto devices
3# 3#
4 4
5ifdef CONFIG_ZCRYPT_MONOLITHIC
6
7z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \
8 zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o
9obj-$(CONFIG_ZCRYPT) += z90crypt.o
10
11else
12
5ap-objs := ap_bus.o 13ap-objs := ap_bus.o
6obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o 14obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o
7obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o zcrypt_cex4.o 15obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o
8obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o 16
17endif
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b8b340ac533..b77ae519d79 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2006, 2012 2 * linux/drivers/s390/crypto/ap_bus.c
3 *
4 * Copyright (C) 2006 IBM Corporation
3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -40,10 +42,10 @@
40#include <asm/reset.h> 42#include <asm/reset.h>
41#include <asm/airq.h> 43#include <asm/airq.h>
42#include <linux/atomic.h> 44#include <linux/atomic.h>
45#include <asm/system.h>
43#include <asm/isc.h> 46#include <asm/isc.h>
44#include <linux/hrtimer.h> 47#include <linux/hrtimer.h>
45#include <linux/ktime.h> 48#include <linux/ktime.h>
46#include <asm/facility.h>
47 49
48#include "ap_bus.h" 50#include "ap_bus.h"
49 51
@@ -62,14 +64,13 @@ static void ap_interrupt_handler(void *unused1, void *unused2);
62static void ap_reset(struct ap_device *ap_dev); 64static void ap_reset(struct ap_device *ap_dev);
63static void ap_config_timeout(unsigned long ptr); 65static void ap_config_timeout(unsigned long ptr);
64static int ap_select_domain(void); 66static int ap_select_domain(void);
65static void ap_query_configuration(void);
66 67
67/* 68/*
68 * Module description. 69 * Module description.
69 */ 70 */
70MODULE_AUTHOR("IBM Corporation"); 71MODULE_AUTHOR("IBM Corporation");
71MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ 72MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
72 "Copyright IBM Corp. 2006, 2012"); 73 "Copyright 2006 IBM Corporation");
73MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
74 75
75/* 76/*
@@ -85,7 +86,6 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000);
85MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 86MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
86 87
87static struct device *ap_root_device = NULL; 88static struct device *ap_root_device = NULL;
88static struct ap_config_info *ap_configuration;
89static DEFINE_SPINLOCK(ap_device_list_lock); 89static DEFINE_SPINLOCK(ap_device_list_lock);
90static LIST_HEAD(ap_device_list); 90static LIST_HEAD(ap_device_list);
91 91
@@ -160,19 +160,6 @@ static int ap_interrupts_available(void)
160} 160}
161 161
162/** 162/**
163 * ap_configuration_available(): Test if AP configuration
164 * information is available.
165 *
166 * Returns 1 if AP configuration information is available.
167 */
168#ifdef CONFIG_64BIT
169static int ap_configuration_available(void)
170{
171 return test_facility(2) && test_facility(12);
172}
173#endif
174
175/**
176 * ap_test_queue(): Test adjunct processor queue. 163 * ap_test_queue(): Test adjunct processor queue.
177 * @qid: The AP queue number 164 * @qid: The AP queue number
178 * @queue_depth: Pointer to queue depth value 165 * @queue_depth: Pointer to queue depth value
@@ -228,7 +215,7 @@ ap_queue_interruption_control(ap_qid_t qid, void *ind)
228 register struct ap_queue_status reg1_out asm ("1"); 215 register struct ap_queue_status reg1_out asm ("1");
229 register void *reg2 asm ("2") = ind; 216 register void *reg2 asm ("2") = ind;
230 asm volatile( 217 asm volatile(
231 ".long 0xb2af0000" /* PQAP(AQIC) */ 218 ".long 0xb2af0000" /* PQAP(RAPQ) */
232 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 219 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
233 : 220 :
234 : "cc" ); 221 : "cc" );
@@ -245,7 +232,7 @@ __ap_query_functions(ap_qid_t qid, unsigned int *functions)
245 register unsigned long reg2 asm ("2"); 232 register unsigned long reg2 asm ("2");
246 233
247 asm volatile( 234 asm volatile(
248 ".long 0xb2af0000\n" /* PQAP(TAPQ) */ 235 ".long 0xb2af0000\n"
249 "0:\n" 236 "0:\n"
250 EX_TABLE(0b, 0b) 237 EX_TABLE(0b, 0b)
251 : "+d" (reg0), "+d" (reg1), "=d" (reg2) 238 : "+d" (reg0), "+d" (reg1), "=d" (reg2)
@@ -257,26 +244,6 @@ __ap_query_functions(ap_qid_t qid, unsigned int *functions)
257} 244}
258#endif 245#endif
259 246
260#ifdef CONFIG_64BIT
261static inline int __ap_query_configuration(struct ap_config_info *config)
262{
263 register unsigned long reg0 asm ("0") = 0x04000000UL;
264 register unsigned long reg1 asm ("1") = -EINVAL;
265 register unsigned char *reg2 asm ("2") = (unsigned char *)config;
266
267 asm volatile(
268 ".long 0xb2af0000\n" /* PQAP(QCI) */
269 "0: la %1,0\n"
270 "1:\n"
271 EX_TABLE(0b, 1b)
272 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
273 :
274 : "cc");
275
276 return reg1;
277}
278#endif
279
280/** 247/**
281 * ap_query_functions(): Query supported functions. 248 * ap_query_functions(): Query supported functions.
282 * @qid: The AP queue number 249 * @qid: The AP queue number
@@ -327,6 +294,25 @@ static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
327} 294}
328 295
329/** 296/**
297 * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA
298 * support.
299 * @qid: The AP queue number
300 *
301 * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not.
302 */
303int ap_4096_commands_available(ap_qid_t qid)
304{
305 unsigned int functions;
306
307 if (ap_query_functions(qid, &functions))
308 return 0;
309
310 return test_ap_facility(functions, 1) &&
311 test_ap_facility(functions, 2);
312}
313EXPORT_SYMBOL(ap_4096_commands_available);
314
315/**
330 * ap_queue_enable_interruption(): Enable interruption on an AP. 316 * ap_queue_enable_interruption(): Enable interruption on an AP.
331 * @qid: The AP queue number 317 * @qid: The AP queue number
332 * @ind: the notification indicator byte 318 * @ind: the notification indicator byte
@@ -352,12 +338,6 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
352 break; 338 break;
353 case AP_RESPONSE_RESET_IN_PROGRESS: 339 case AP_RESPONSE_RESET_IN_PROGRESS:
354 case AP_RESPONSE_BUSY: 340 case AP_RESPONSE_BUSY:
355 if (i < AP_MAX_RESET - 1) {
356 udelay(5);
357 status = ap_queue_interruption_control(qid,
358 ind);
359 continue;
360 }
361 break; 341 break;
362 case AP_RESPONSE_Q_NOT_AVAIL: 342 case AP_RESPONSE_Q_NOT_AVAIL:
363 case AP_RESPONSE_DECONFIGURED: 343 case AP_RESPONSE_DECONFIGURED:
@@ -411,7 +391,7 @@ __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
411 reg0 |= 0x400000UL; 391 reg0 |= 0x400000UL;
412 392
413 asm volatile ( 393 asm volatile (
414 "0: .long 0xb2ad0042\n" /* NQAP */ 394 "0: .long 0xb2ad0042\n" /* DQAP */
415 " brc 2,0b" 395 " brc 2,0b"
416 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 396 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
417 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 397 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
@@ -470,7 +450,7 @@ __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
470 450
471 451
472 asm volatile( 452 asm volatile(
473 "0: .long 0xb2ae0064\n" /* DQAP */ 453 "0: .long 0xb2ae0064\n"
474 " brc 6,0b\n" 454 " brc 6,0b\n"
475 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 455 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
476 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 456 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
@@ -673,34 +653,6 @@ static ssize_t ap_request_count_show(struct device *dev,
673 653
674static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 654static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
675 655
676static ssize_t ap_requestq_count_show(struct device *dev,
677 struct device_attribute *attr, char *buf)
678{
679 struct ap_device *ap_dev = to_ap_dev(dev);
680 int rc;
681
682 spin_lock_bh(&ap_dev->lock);
683 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
684 spin_unlock_bh(&ap_dev->lock);
685 return rc;
686}
687
688static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
689
690static ssize_t ap_pendingq_count_show(struct device *dev,
691 struct device_attribute *attr, char *buf)
692{
693 struct ap_device *ap_dev = to_ap_dev(dev);
694 int rc;
695
696 spin_lock_bh(&ap_dev->lock);
697 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
698 spin_unlock_bh(&ap_dev->lock);
699 return rc;
700}
701
702static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
703
704static ssize_t ap_modalias_show(struct device *dev, 656static ssize_t ap_modalias_show(struct device *dev,
705 struct device_attribute *attr, char *buf) 657 struct device_attribute *attr, char *buf)
706{ 658{
@@ -709,23 +661,11 @@ static ssize_t ap_modalias_show(struct device *dev,
709 661
710static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 662static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
711 663
712static ssize_t ap_functions_show(struct device *dev,
713 struct device_attribute *attr, char *buf)
714{
715 struct ap_device *ap_dev = to_ap_dev(dev);
716 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
717}
718
719static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
720
721static struct attribute *ap_dev_attrs[] = { 664static struct attribute *ap_dev_attrs[] = {
722 &dev_attr_hwtype.attr, 665 &dev_attr_hwtype.attr,
723 &dev_attr_depth.attr, 666 &dev_attr_depth.attr,
724 &dev_attr_request_count.attr, 667 &dev_attr_request_count.attr,
725 &dev_attr_requestq_count.attr,
726 &dev_attr_pendingq_count.attr,
727 &dev_attr_modalias.attr, 668 &dev_attr_modalias.attr,
728 &dev_attr_ap_functions.attr,
729 NULL 669 NULL
730}; 670};
731static struct attribute_group ap_dev_attr_group = { 671static struct attribute_group ap_dev_attr_group = {
@@ -828,7 +768,6 @@ static int ap_bus_resume(struct device *dev)
828 ap_suspend_flag = 0; 768 ap_suspend_flag = 0;
829 if (!ap_interrupts_available()) 769 if (!ap_interrupts_available())
830 ap_interrupt_indicator = NULL; 770 ap_interrupt_indicator = NULL;
831 ap_query_configuration();
832 if (!user_set_domain) { 771 if (!user_set_domain) {
833 ap_domain_index = -1; 772 ap_domain_index = -1;
834 ap_select_domain(); 773 ap_select_domain();
@@ -897,12 +836,12 @@ static void __ap_flush_queue(struct ap_device *ap_dev)
897 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 836 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
898 list_del_init(&ap_msg->list); 837 list_del_init(&ap_msg->list);
899 ap_dev->pendingq_count--; 838 ap_dev->pendingq_count--;
900 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 839 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
901 } 840 }
902 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 841 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
903 list_del_init(&ap_msg->list); 842 list_del_init(&ap_msg->list);
904 ap_dev->requestq_count--; 843 ap_dev->requestq_count--;
905 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 844 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
906 } 845 }
907} 846}
908 847
@@ -952,20 +891,6 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
952} 891}
953EXPORT_SYMBOL(ap_driver_unregister); 892EXPORT_SYMBOL(ap_driver_unregister);
954 893
955void ap_bus_force_rescan(void)
956{
957 /* Delete the AP bus rescan timer. */
958 del_timer(&ap_config_timer);
959
960 /* processing a synchonuous bus rescan */
961 ap_scan_bus(NULL);
962
963 /* Setup the AP bus rescan timer again. */
964 ap_config_timer.expires = jiffies + ap_config_time * HZ;
965 add_timer(&ap_config_timer);
966}
967EXPORT_SYMBOL(ap_bus_force_rescan);
968
969/* 894/*
970 * AP bus attributes. 895 * AP bus attributes.
971 */ 896 */
@@ -1068,65 +993,6 @@ static struct bus_attribute *const ap_bus_attrs[] = {
1068 NULL, 993 NULL,
1069}; 994};
1070 995
1071static inline int ap_test_config(unsigned int *field, unsigned int nr)
1072{
1073 if (nr > 0xFFu)
1074 return 0;
1075 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1076}
1077
1078/*
1079 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
1080 * @id AP card ID
1081 *
1082 * Returns 0 if the card is not configured
1083 * 1 if the card is configured or
1084 * if the configuration information is not available
1085 */
1086static inline int ap_test_config_card_id(unsigned int id)
1087{
1088 if (!ap_configuration)
1089 return 1;
1090 return ap_test_config(ap_configuration->apm, id);
1091}
1092
1093/*
1094 * ap_test_config_domain(): Test, whether an AP usage domain is configured.
1095 * @domain AP usage domain ID
1096 *
1097 * Returns 0 if the usage domain is not configured
1098 * 1 if the usage domain is configured or
1099 * if the configuration information is not available
1100 */
1101static inline int ap_test_config_domain(unsigned int domain)
1102{
1103 if (!ap_configuration)
1104 return 1;
1105 return ap_test_config(ap_configuration->aqm, domain);
1106}
1107
1108/**
1109 * ap_query_configuration(): Query AP configuration information.
1110 *
1111 * Query information of installed cards and configured domains from AP.
1112 */
1113static void ap_query_configuration(void)
1114{
1115#ifdef CONFIG_64BIT
1116 if (ap_configuration_available()) {
1117 if (!ap_configuration)
1118 ap_configuration =
1119 kzalloc(sizeof(struct ap_config_info),
1120 GFP_KERNEL);
1121 if (ap_configuration)
1122 __ap_query_configuration(ap_configuration);
1123 } else
1124 ap_configuration = NULL;
1125#else
1126 ap_configuration = NULL;
1127#endif
1128}
1129
1130/** 996/**
1131 * ap_select_domain(): Select an AP domain. 997 * ap_select_domain(): Select an AP domain.
1132 * 998 *
@@ -1135,7 +1001,6 @@ static void ap_query_configuration(void)
1135static int ap_select_domain(void) 1001static int ap_select_domain(void)
1136{ 1002{
1137 int queue_depth, device_type, count, max_count, best_domain; 1003 int queue_depth, device_type, count, max_count, best_domain;
1138 ap_qid_t qid;
1139 int rc, i, j; 1004 int rc, i, j;
1140 1005
1141 /* 1006 /*
@@ -1149,13 +1014,9 @@ static int ap_select_domain(void)
1149 best_domain = -1; 1014 best_domain = -1;
1150 max_count = 0; 1015 max_count = 0;
1151 for (i = 0; i < AP_DOMAINS; i++) { 1016 for (i = 0; i < AP_DOMAINS; i++) {
1152 if (!ap_test_config_domain(i))
1153 continue;
1154 count = 0; 1017 count = 0;
1155 for (j = 0; j < AP_DEVICES; j++) { 1018 for (j = 0; j < AP_DEVICES; j++) {
1156 if (!ap_test_config_card_id(j)) 1019 ap_qid_t qid = AP_MKQID(j, i);
1157 continue;
1158 qid = AP_MKQID(j, i);
1159 rc = ap_query_queue(qid, &queue_depth, &device_type); 1020 rc = ap_query_queue(qid, &queue_depth, &device_type);
1160 if (rc) 1021 if (rc)
1161 continue; 1022 continue;
@@ -1272,7 +1133,7 @@ out:
1272 1133
1273static void ap_interrupt_handler(void *unused1, void *unused2) 1134static void ap_interrupt_handler(void *unused1, void *unused2)
1274{ 1135{
1275 inc_irq_stat(IRQIO_APB); 1136 kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++;
1276 tasklet_schedule(&ap_tasklet); 1137 tasklet_schedule(&ap_tasklet);
1277} 1138}
1278 1139
@@ -1304,7 +1165,6 @@ static void ap_scan_bus(struct work_struct *unused)
1304 unsigned int device_functions; 1165 unsigned int device_functions;
1305 int rc, i; 1166 int rc, i;
1306 1167
1307 ap_query_configuration();
1308 if (ap_select_domain() != 0) 1168 if (ap_select_domain() != 0)
1309 return; 1169 return;
1310 for (i = 0; i < AP_DEVICES; i++) { 1170 for (i = 0; i < AP_DEVICES; i++) {
@@ -1312,10 +1172,7 @@ static void ap_scan_bus(struct work_struct *unused)
1312 dev = bus_find_device(&ap_bus_type, NULL, 1172 dev = bus_find_device(&ap_bus_type, NULL,
1313 (void *)(unsigned long)qid, 1173 (void *)(unsigned long)qid,
1314 __ap_scan_bus); 1174 __ap_scan_bus);
1315 if (ap_test_config_card_id(i)) 1175 rc = ap_query_queue(qid, &queue_depth, &device_type);
1316 rc = ap_query_queue(qid, &queue_depth, &device_type);
1317 else
1318 rc = -ENODEV;
1319 if (dev) { 1176 if (dev) {
1320 if (rc == -EBUSY) { 1177 if (rc == -EBUSY) {
1321 set_current_state(TASK_UNINTERRUPTIBLE); 1178 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1356,22 +1213,29 @@ static void ap_scan_bus(struct work_struct *unused)
1356 (unsigned long) ap_dev); 1213 (unsigned long) ap_dev);
1357 switch (device_type) { 1214 switch (device_type) {
1358 case 0: 1215 case 0:
1359 /* device type probing for old cards */
1360 if (ap_probe_device_type(ap_dev)) { 1216 if (ap_probe_device_type(ap_dev)) {
1361 kfree(ap_dev); 1217 kfree(ap_dev);
1362 continue; 1218 continue;
1363 } 1219 }
1364 break; 1220 break;
1221 case 10:
1222 if (ap_query_functions(qid, &device_functions)) {
1223 kfree(ap_dev);
1224 continue;
1225 }
1226 if (test_ap_facility(device_functions, 3))
1227 ap_dev->device_type = AP_DEVICE_TYPE_CEX3C;
1228 else if (test_ap_facility(device_functions, 4))
1229 ap_dev->device_type = AP_DEVICE_TYPE_CEX3A;
1230 else {
1231 kfree(ap_dev);
1232 continue;
1233 }
1234 break;
1365 default: 1235 default:
1366 ap_dev->device_type = device_type; 1236 ap_dev->device_type = device_type;
1367 } 1237 }
1368 1238
1369 rc = ap_query_functions(qid, &device_functions);
1370 if (!rc)
1371 ap_dev->functions = device_functions;
1372 else
1373 ap_dev->functions = 0u;
1374
1375 ap_dev->device.bus = &ap_bus_type; 1239 ap_dev->device.bus = &ap_bus_type;
1376 ap_dev->device.parent = ap_root_device; 1240 ap_dev->device.parent = ap_root_device;
1377 if (dev_set_name(&ap_dev->device, "card%02x", 1241 if (dev_set_name(&ap_dev->device, "card%02x",
@@ -1407,16 +1271,18 @@ ap_config_timeout(unsigned long ptr)
1407} 1271}
1408 1272
1409/** 1273/**
1410 * __ap_schedule_poll_timer(): Schedule poll timer. 1274 * ap_schedule_poll_timer(): Schedule poll timer.
1411 * 1275 *
1412 * Set up the timer to run the poll tasklet 1276 * Set up the timer to run the poll tasklet
1413 */ 1277 */
1414static inline void __ap_schedule_poll_timer(void) 1278static inline void ap_schedule_poll_timer(void)
1415{ 1279{
1416 ktime_t hr_time; 1280 ktime_t hr_time;
1417 1281
1418 spin_lock_bh(&ap_poll_timer_lock); 1282 spin_lock_bh(&ap_poll_timer_lock);
1419 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) 1283 if (ap_using_interrupts() || ap_suspend_flag)
1284 goto out;
1285 if (hrtimer_is_queued(&ap_poll_timer))
1420 goto out; 1286 goto out;
1421 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1287 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1422 hr_time = ktime_set(0, poll_timeout); 1288 hr_time = ktime_set(0, poll_timeout);
@@ -1428,18 +1294,6 @@ out:
1428} 1294}
1429 1295
1430/** 1296/**
1431 * ap_schedule_poll_timer(): Schedule poll timer.
1432 *
1433 * Set up the timer to run the poll tasklet
1434 */
1435static inline void ap_schedule_poll_timer(void)
1436{
1437 if (ap_using_interrupts())
1438 return;
1439 __ap_schedule_poll_timer();
1440}
1441
1442/**
1443 * ap_poll_read(): Receive pending reply messages from an AP device. 1297 * ap_poll_read(): Receive pending reply messages from an AP device.
1444 * @ap_dev: pointer to the AP device 1298 * @ap_dev: pointer to the AP device
1445 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1299 * @flags: pointer to control flags, bit 2^0 is set if another poll is
@@ -1465,7 +1319,7 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1465 continue; 1319 continue;
1466 list_del_init(&ap_msg->list); 1320 list_del_init(&ap_msg->list);
1467 ap_dev->pendingq_count--; 1321 ap_dev->pendingq_count--;
1468 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 1322 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
1469 break; 1323 break;
1470 } 1324 }
1471 if (ap_dev->queue_count > 0) 1325 if (ap_dev->queue_count > 0)
@@ -1520,9 +1374,8 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1520 *flags |= 1; 1374 *flags |= 1;
1521 *flags |= 2; 1375 *flags |= 2;
1522 break; 1376 break;
1523 case AP_RESPONSE_RESET_IN_PROGRESS:
1524 __ap_schedule_poll_timer();
1525 case AP_RESPONSE_Q_FULL: 1377 case AP_RESPONSE_Q_FULL:
1378 case AP_RESPONSE_RESET_IN_PROGRESS:
1526 *flags |= 2; 1379 *flags |= 2;
1527 break; 1380 break;
1528 case AP_RESPONSE_MESSAGE_TOO_BIG: 1381 case AP_RESPONSE_MESSAGE_TOO_BIG:
@@ -1586,10 +1439,10 @@ static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_ms
1586 return -EBUSY; 1439 return -EBUSY;
1587 case AP_RESPONSE_REQ_FAC_NOT_INST: 1440 case AP_RESPONSE_REQ_FAC_NOT_INST:
1588 case AP_RESPONSE_MESSAGE_TOO_BIG: 1441 case AP_RESPONSE_MESSAGE_TOO_BIG:
1589 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1442 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1590 return -EINVAL; 1443 return -EINVAL;
1591 default: /* Device is gone. */ 1444 default: /* Device is gone. */
1592 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1445 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1593 return -ENODEV; 1446 return -ENODEV;
1594 } 1447 }
1595 } else { 1448 } else {
@@ -1607,10 +1460,6 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1607 unsigned long flags; 1460 unsigned long flags;
1608 int rc; 1461 int rc;
1609 1462
1610 /* For asynchronous message handling a valid receive-callback
1611 * is required. */
1612 BUG_ON(!ap_msg->receive);
1613
1614 spin_lock_bh(&ap_dev->lock); 1463 spin_lock_bh(&ap_dev->lock);
1615 if (!ap_dev->unregistered) { 1464 if (!ap_dev->unregistered) {
1616 /* Make room on the queue by polling for finished requests. */ 1465 /* Make room on the queue by polling for finished requests. */
@@ -1622,7 +1471,7 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1622 if (rc == -ENODEV) 1471 if (rc == -ENODEV)
1623 ap_dev->unregistered = 1; 1472 ap_dev->unregistered = 1;
1624 } else { 1473 } else {
1625 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1474 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1626 rc = -ENODEV; 1475 rc = -ENODEV;
1627 } 1476 }
1628 spin_unlock_bh(&ap_dev->lock); 1477 spin_unlock_bh(&ap_dev->lock);
@@ -1692,8 +1541,6 @@ static void ap_reset(struct ap_device *ap_dev)
1692 rc = ap_init_queue(ap_dev->qid); 1541 rc = ap_init_queue(ap_dev->qid);
1693 if (rc == -ENODEV) 1542 if (rc == -ENODEV)
1694 ap_dev->unregistered = 1; 1543 ap_dev->unregistered = 1;
1695 else
1696 __ap_schedule_poll_timer();
1697} 1544}
1698 1545
1699static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1546static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
@@ -1917,7 +1764,6 @@ int __init ap_module_init(void)
1917 goto out_root; 1764 goto out_root;
1918 } 1765 }
1919 1766
1920 ap_query_configuration();
1921 if (ap_select_domain() == 0) 1767 if (ap_select_domain() == 0)
1922 ap_scan_bus(NULL); 1768 ap_scan_bus(NULL);
1923 1769
@@ -2003,5 +1849,7 @@ void ap_module_exit(void)
2003 } 1849 }
2004} 1850}
2005 1851
1852#ifndef CONFIG_ZCRYPT_MONOLITHIC
2006module_init(ap_module_init); 1853module_init(ap_module_init);
2007module_exit(ap_module_exit); 1854module_exit(ap_module_exit);
1855#endif
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 685f6cc022f..d960a6309ee 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright IBM Corp. 2006, 2012 2 * linux/drivers/s390/crypto/ap_bus.h
3 *
4 * Copyright (C) 2006 IBM Corporation
3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -83,12 +85,13 @@ int ap_queue_status_invalid_test(struct ap_queue_status *status)
83 return !(memcmp(status, &invalid, sizeof(struct ap_queue_status))); 85 return !(memcmp(status, &invalid, sizeof(struct ap_queue_status)));
84} 86}
85 87
86#define AP_MAX_BITS 31 88#define MAX_AP_FACILITY 31
87static inline int ap_test_bit(unsigned int *ptr, unsigned int nr) 89
90static inline int test_ap_facility(unsigned int function, unsigned int nr)
88{ 91{
89 if (nr > AP_MAX_BITS) 92 if (nr > MAX_AP_FACILITY)
90 return 0; 93 return 0;
91 return (*ptr & (0x80000000u >> nr)) != 0; 94 return function & (unsigned int)(0x80000000 >> nr);
92} 95}
93 96
94#define AP_RESPONSE_NORMAL 0x00 97#define AP_RESPONSE_NORMAL 0x00
@@ -116,15 +119,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
116#define AP_DEVICE_TYPE_CEX2C 7 119#define AP_DEVICE_TYPE_CEX2C 7
117#define AP_DEVICE_TYPE_CEX3A 8 120#define AP_DEVICE_TYPE_CEX3A 8
118#define AP_DEVICE_TYPE_CEX3C 9 121#define AP_DEVICE_TYPE_CEX3C 9
119#define AP_DEVICE_TYPE_CEX4 10
120
121/*
122 * Known function facilities
123 */
124#define AP_FUNC_MEX4K 1
125#define AP_FUNC_CRT4K 2
126#define AP_FUNC_COPRO 3
127#define AP_FUNC_ACCEL 4
128 122
129/* 123/*
130 * AP reset flag states 124 * AP reset flag states
@@ -142,6 +136,9 @@ struct ap_driver {
142 136
143 int (*probe)(struct ap_device *); 137 int (*probe)(struct ap_device *);
144 void (*remove)(struct ap_device *); 138 void (*remove)(struct ap_device *);
139 /* receive is called from tasklet context */
140 void (*receive)(struct ap_device *, struct ap_message *,
141 struct ap_message *);
145 int request_timeout; /* request timeout in jiffies */ 142 int request_timeout; /* request timeout in jiffies */
146}; 143};
147 144
@@ -159,7 +156,6 @@ struct ap_device {
159 ap_qid_t qid; /* AP queue id. */ 156 ap_qid_t qid; /* AP queue id. */
160 int queue_depth; /* AP queue depth.*/ 157 int queue_depth; /* AP queue depth.*/
161 int device_type; /* AP device type. */ 158 int device_type; /* AP device type. */
162 unsigned int functions; /* AP device function bitfield. */
163 int unregistered; /* marks AP device as unregistered */ 159 int unregistered; /* marks AP device as unregistered */
164 struct timer_list timeout; /* Timer for request timeouts. */ 160 struct timer_list timeout; /* Timer for request timeouts. */
165 int reset; /* Reset required after req. timeout. */ 161 int reset; /* Reset required after req. timeout. */
@@ -187,22 +183,8 @@ struct ap_message {
187 183
188 void *private; /* ap driver private pointer. */ 184 void *private; /* ap driver private pointer. */
189 unsigned int special:1; /* Used for special commands. */ 185 unsigned int special:1; /* Used for special commands. */
190 /* receive is called from tasklet context */
191 void (*receive)(struct ap_device *, struct ap_message *,
192 struct ap_message *);
193}; 186};
194 187
195struct ap_config_info {
196 unsigned int special_command:1;
197 unsigned int ap_extended:1;
198 unsigned char reserved1:6;
199 unsigned char reserved2[15];
200 unsigned int apm[8]; /* AP ID mask */
201 unsigned int aqm[8]; /* AP queue mask */
202 unsigned int adm[8]; /* AP domain mask */
203 unsigned char reserved4[16];
204} __packed;
205
206#define AP_DEVICE(dt) \ 188#define AP_DEVICE(dt) \
207 .dev_type=(dt), \ 189 .dev_type=(dt), \
208 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, 190 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
@@ -217,7 +199,6 @@ static inline void ap_init_message(struct ap_message *ap_msg)
217 ap_msg->psmid = 0; 199 ap_msg->psmid = 0;
218 ap_msg->length = 0; 200 ap_msg->length = 0;
219 ap_msg->special = 0; 201 ap_msg->special = 0;
220 ap_msg->receive = NULL;
221} 202}
222 203
223/* 204/*
@@ -231,9 +212,10 @@ int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
231void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 212void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
232void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); 213void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
233void ap_flush_queue(struct ap_device *ap_dev); 214void ap_flush_queue(struct ap_device *ap_dev);
234void ap_bus_force_rescan(void);
235 215
236int ap_module_init(void); 216int ap_module_init(void);
237void ap_module_exit(void); 217void ap_module_exit(void);
238 218
219int ap_4096_commands_available(ap_qid_t qid);
220
239#endif /* _AP_BUS_H_ */ 221#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 31cfaa55607..88ad33ed5d3 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_api.c
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2012 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -9,7 +11,6 @@
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
13 * 14 *
14 * This program is free software; you can redistribute it and/or modify 15 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 16 * it under the terms of the GNU General Public License as published by
@@ -38,39 +39,25 @@
38#include <linux/atomic.h> 39#include <linux/atomic.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <linux/hw_random.h> 41#include <linux/hw_random.h>
41#include <linux/debugfs.h>
42#include <asm/debug.h>
43 42
44#include "zcrypt_debug.h"
45#include "zcrypt_api.h" 43#include "zcrypt_api.h"
46 44
47/* 45/*
48 * Module description. 46 * Module description.
49 */ 47 */
50MODULE_AUTHOR("IBM Corporation"); 48MODULE_AUTHOR("IBM Corporation");
51MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 49MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
52 "Copyright IBM Corp. 2001, 2012"); 50 "Copyright 2001, 2006 IBM Corporation");
53MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
54 52
55static DEFINE_SPINLOCK(zcrypt_device_lock); 53static DEFINE_SPINLOCK(zcrypt_device_lock);
56static LIST_HEAD(zcrypt_device_list); 54static LIST_HEAD(zcrypt_device_list);
57static int zcrypt_device_count = 0; 55static int zcrypt_device_count = 0;
58static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 56static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
59static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
60
61atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
62EXPORT_SYMBOL(zcrypt_rescan_req);
63 57
64static int zcrypt_rng_device_add(void); 58static int zcrypt_rng_device_add(void);
65static void zcrypt_rng_device_remove(void); 59static void zcrypt_rng_device_remove(void);
66 60
67static DEFINE_SPINLOCK(zcrypt_ops_list_lock);
68static LIST_HEAD(zcrypt_ops_list);
69
70static debug_info_t *zcrypt_dbf_common;
71static debug_info_t *zcrypt_dbf_devices;
72static struct dentry *debugfs_root;
73
74/* 61/*
75 * Device attributes common for all crypto devices. 62 * Device attributes common for all crypto devices.
76 */ 63 */
@@ -100,8 +87,6 @@ static ssize_t zcrypt_online_store(struct device *dev,
100 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 87 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
101 return -EINVAL; 88 return -EINVAL;
102 zdev->online = online; 89 zdev->online = online;
103 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid,
104 zdev->online);
105 if (!online) 90 if (!online)
106 ap_flush_queue(zdev->ap_dev); 91 ap_flush_queue(zdev->ap_dev);
107 return count; 92 return count;
@@ -120,24 +105,6 @@ static struct attribute_group zcrypt_device_attr_group = {
120}; 105};
121 106
122/** 107/**
123 * Process a rescan of the transport layer.
124 *
125 * Returns 1, if the rescan has been processed, otherwise 0.
126 */
127static inline int zcrypt_process_rescan(void)
128{
129 if (atomic_read(&zcrypt_rescan_req)) {
130 atomic_set(&zcrypt_rescan_req, 0);
131 atomic_inc(&zcrypt_rescan_count);
132 ap_bus_force_rescan();
133 ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d",
134 atomic_inc_return(&zcrypt_rescan_count));
135 return 1;
136 }
137 return 0;
138}
139
140/**
141 * __zcrypt_increase_preference(): Increase preference of a crypto device. 108 * __zcrypt_increase_preference(): Increase preference of a crypto device.
142 * @zdev: Pointer the crypto device 109 * @zdev: Pointer the crypto device
143 * 110 *
@@ -225,7 +192,6 @@ struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size)
225 zdev->reply.length = max_response_size; 192 zdev->reply.length = max_response_size;
226 spin_lock_init(&zdev->lock); 193 spin_lock_init(&zdev->lock);
227 INIT_LIST_HEAD(&zdev->list); 194 INIT_LIST_HEAD(&zdev->list);
228 zdev->dbf_area = zcrypt_dbf_devices;
229 return zdev; 195 return zdev;
230 196
231out_free: 197out_free:
@@ -251,8 +217,6 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
251{ 217{
252 int rc; 218 int rc;
253 219
254 if (!zdev->ops)
255 return -ENODEV;
256 rc = sysfs_create_group(&zdev->ap_dev->device.kobj, 220 rc = sysfs_create_group(&zdev->ap_dev->device.kobj,
257 &zcrypt_device_attr_group); 221 &zcrypt_device_attr_group);
258 if (rc) 222 if (rc)
@@ -261,8 +225,6 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
261 kref_init(&zdev->refcount); 225 kref_init(&zdev->refcount);
262 spin_lock_bh(&zcrypt_device_lock); 226 spin_lock_bh(&zcrypt_device_lock);
263 zdev->online = 1; /* New devices are online by default. */ 227 zdev->online = 1; /* New devices are online by default. */
264 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid,
265 zdev->online);
266 list_add_tail(&zdev->list, &zcrypt_device_list); 228 list_add_tail(&zdev->list, &zcrypt_device_list);
267 __zcrypt_increase_preference(zdev); 229 __zcrypt_increase_preference(zdev);
268 zcrypt_device_count++; 230 zcrypt_device_count++;
@@ -309,67 +271,6 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev)
309} 271}
310EXPORT_SYMBOL(zcrypt_device_unregister); 272EXPORT_SYMBOL(zcrypt_device_unregister);
311 273
312void zcrypt_msgtype_register(struct zcrypt_ops *zops)
313{
314 if (zops->owner) {
315 spin_lock_bh(&zcrypt_ops_list_lock);
316 list_add_tail(&zops->list, &zcrypt_ops_list);
317 spin_unlock_bh(&zcrypt_ops_list_lock);
318 }
319}
320EXPORT_SYMBOL(zcrypt_msgtype_register);
321
322void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
323{
324 spin_lock_bh(&zcrypt_ops_list_lock);
325 list_del_init(&zops->list);
326 spin_unlock_bh(&zcrypt_ops_list_lock);
327}
328EXPORT_SYMBOL(zcrypt_msgtype_unregister);
329
330static inline
331struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant)
332{
333 struct zcrypt_ops *zops;
334 int found = 0;
335
336 spin_lock_bh(&zcrypt_ops_list_lock);
337 list_for_each_entry(zops, &zcrypt_ops_list, list) {
338 if ((zops->variant == variant) &&
339 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) {
340 found = 1;
341 break;
342 }
343 }
344 spin_unlock_bh(&zcrypt_ops_list_lock);
345
346 if (!found)
347 return NULL;
348 return zops;
349}
350
351struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
352{
353 struct zcrypt_ops *zops = NULL;
354
355 zops = __ops_lookup(name, variant);
356 if (!zops) {
357 request_module(name);
358 zops = __ops_lookup(name, variant);
359 }
360 if ((!zops) || (!try_module_get(zops->owner)))
361 return NULL;
362 return zops;
363}
364EXPORT_SYMBOL(zcrypt_msgtype_request);
365
366void zcrypt_msgtype_release(struct zcrypt_ops *zops)
367{
368 if (zops)
369 module_put(zops->owner);
370}
371EXPORT_SYMBOL(zcrypt_msgtype_release);
372
373/** 274/**
374 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 275 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
375 * 276 *
@@ -741,11 +642,6 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
741 do { 642 do {
742 rc = zcrypt_rsa_modexpo(&mex); 643 rc = zcrypt_rsa_modexpo(&mex);
743 } while (rc == -EAGAIN); 644 } while (rc == -EAGAIN);
744 /* on failure: retry once again after a requested rescan */
745 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
746 do {
747 rc = zcrypt_rsa_modexpo(&mex);
748 } while (rc == -EAGAIN);
749 if (rc) 645 if (rc)
750 return rc; 646 return rc;
751 return put_user(mex.outputdatalength, &umex->outputdatalength); 647 return put_user(mex.outputdatalength, &umex->outputdatalength);
@@ -758,11 +654,6 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
758 do { 654 do {
759 rc = zcrypt_rsa_crt(&crt); 655 rc = zcrypt_rsa_crt(&crt);
760 } while (rc == -EAGAIN); 656 } while (rc == -EAGAIN);
761 /* on failure: retry once again after a requested rescan */
762 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
763 do {
764 rc = zcrypt_rsa_crt(&crt);
765 } while (rc == -EAGAIN);
766 if (rc) 657 if (rc)
767 return rc; 658 return rc;
768 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 659 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
@@ -775,11 +666,6 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
775 do { 666 do {
776 rc = zcrypt_send_cprb(&xcRB); 667 rc = zcrypt_send_cprb(&xcRB);
777 } while (rc == -EAGAIN); 668 } while (rc == -EAGAIN);
778 /* on failure: retry once again after a requested rescan */
779 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
780 do {
781 rc = zcrypt_send_cprb(&xcRB);
782 } while (rc == -EAGAIN);
783 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 669 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
784 return -EFAULT; 670 return -EFAULT;
785 return rc; 671 return rc;
@@ -886,15 +772,10 @@ static long trans_modexpo32(struct file *filp, unsigned int cmd,
886 do { 772 do {
887 rc = zcrypt_rsa_modexpo(&mex64); 773 rc = zcrypt_rsa_modexpo(&mex64);
888 } while (rc == -EAGAIN); 774 } while (rc == -EAGAIN);
889 /* on failure: retry once again after a requested rescan */ 775 if (!rc)
890 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 776 rc = put_user(mex64.outputdatalength,
891 do { 777 &umex32->outputdatalength);
892 rc = zcrypt_rsa_modexpo(&mex64); 778 return rc;
893 } while (rc == -EAGAIN);
894 if (rc)
895 return rc;
896 return put_user(mex64.outputdatalength,
897 &umex32->outputdatalength);
898} 779}
899 780
900struct compat_ica_rsa_modexpo_crt { 781struct compat_ica_rsa_modexpo_crt {
@@ -931,15 +812,10 @@ static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
931 do { 812 do {
932 rc = zcrypt_rsa_crt(&crt64); 813 rc = zcrypt_rsa_crt(&crt64);
933 } while (rc == -EAGAIN); 814 } while (rc == -EAGAIN);
934 /* on failure: retry once again after a requested rescan */ 815 if (!rc)
935 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 816 rc = put_user(crt64.outputdatalength,
936 do { 817 &ucrt32->outputdatalength);
937 rc = zcrypt_rsa_crt(&crt64); 818 return rc;
938 } while (rc == -EAGAIN);
939 if (rc)
940 return rc;
941 return put_user(crt64.outputdatalength,
942 &ucrt32->outputdatalength);
943} 819}
944 820
945struct compat_ica_xcRB { 821struct compat_ica_xcRB {
@@ -995,11 +871,6 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
995 do { 871 do {
996 rc = zcrypt_send_cprb(&xcRB64); 872 rc = zcrypt_send_cprb(&xcRB64);
997 } while (rc == -EAGAIN); 873 } while (rc == -EAGAIN);
998 /* on failure: retry once again after a requested rescan */
999 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1000 do {
1001 rc = zcrypt_send_cprb(&xcRB64);
1002 } while (rc == -EAGAIN);
1003 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 874 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
1004 xcRB32.reply_data_length = xcRB64.reply_data_length; 875 xcRB32.reply_data_length = xcRB64.reply_data_length;
1005 xcRB32.status = xcRB64.status; 876 xcRB32.status = xcRB64.status;
@@ -1257,9 +1128,6 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1257 */ 1128 */
1258 if (zcrypt_rng_buffer_index == 0) { 1129 if (zcrypt_rng_buffer_index == 0) {
1259 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1130 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1260 /* on failure: retry once again after a requested rescan */
1261 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1262 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1263 if (rc < 0) 1131 if (rc < 0)
1264 return -EIO; 1132 return -EIO;
1265 zcrypt_rng_buffer_index = rc / sizeof *data; 1133 zcrypt_rng_buffer_index = rc / sizeof *data;
@@ -1312,30 +1180,6 @@ static void zcrypt_rng_device_remove(void)
1312 mutex_unlock(&zcrypt_rng_mutex); 1180 mutex_unlock(&zcrypt_rng_mutex);
1313} 1181}
1314 1182
1315int __init zcrypt_debug_init(void)
1316{
1317 debugfs_root = debugfs_create_dir("zcrypt", NULL);
1318
1319 zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16);
1320 debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view);
1321 debug_set_level(zcrypt_dbf_common, DBF_ERR);
1322
1323 zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16);
1324 debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view);
1325 debug_set_level(zcrypt_dbf_devices, DBF_ERR);
1326
1327 return 0;
1328}
1329
1330void zcrypt_debug_exit(void)
1331{
1332 debugfs_remove(debugfs_root);
1333 if (zcrypt_dbf_common)
1334 debug_unregister(zcrypt_dbf_common);
1335 if (zcrypt_dbf_devices)
1336 debug_unregister(zcrypt_dbf_devices);
1337}
1338
1339/** 1183/**
1340 * zcrypt_api_init(): Module initialization. 1184 * zcrypt_api_init(): Module initialization.
1341 * 1185 *
@@ -1345,12 +1189,6 @@ int __init zcrypt_api_init(void)
1345{ 1189{
1346 int rc; 1190 int rc;
1347 1191
1348 rc = zcrypt_debug_init();
1349 if (rc)
1350 goto out;
1351
1352 atomic_set(&zcrypt_rescan_req, 0);
1353
1354 /* Register the request sprayer. */ 1192 /* Register the request sprayer. */
1355 rc = misc_register(&zcrypt_misc_device); 1193 rc = misc_register(&zcrypt_misc_device);
1356 if (rc < 0) 1194 if (rc < 0)
@@ -1380,8 +1218,9 @@ void zcrypt_api_exit(void)
1380{ 1218{
1381 remove_proc_entry("driver/z90crypt", NULL); 1219 remove_proc_entry("driver/z90crypt", NULL);
1382 misc_deregister(&zcrypt_misc_device); 1220 misc_deregister(&zcrypt_misc_device);
1383 zcrypt_debug_exit();
1384} 1221}
1385 1222
1223#ifndef CONFIG_ZCRYPT_MONOLITHIC
1386module_init(zcrypt_api_init); 1224module_init(zcrypt_api_init);
1387module_exit(zcrypt_api_exit); 1225module_exit(zcrypt_api_exit);
1226#endif
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 89632919c99..9688f3985b0 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_api.h
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2012 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -9,7 +11,6 @@
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 13 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
13 * 14 *
14 * This program is free software; you can redistribute it and/or modify 15 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by 16 * it under the terms of the GNU General Public License as published by
@@ -29,10 +30,8 @@
29#ifndef _ZCRYPT_API_H_ 30#ifndef _ZCRYPT_API_H_
30#define _ZCRYPT_API_H_ 31#define _ZCRYPT_API_H_
31 32
32#include <linux/atomic.h>
33#include <asm/debug.h>
34#include <asm/zcrypt.h>
35#include "ap_bus.h" 33#include "ap_bus.h"
34#include <asm/zcrypt.h>
36 35
37/* deprecated status calls */ 36/* deprecated status calls */
38#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status) 37#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
@@ -90,9 +89,6 @@ struct zcrypt_ops {
90 struct ica_rsa_modexpo_crt *); 89 struct ica_rsa_modexpo_crt *);
91 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); 90 long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
92 long (*rng)(struct zcrypt_device *, char *); 91 long (*rng)(struct zcrypt_device *, char *);
93 struct list_head list; /* zcrypt ops list. */
94 struct module *owner;
95 int variant;
96}; 92};
97 93
98struct zcrypt_device { 94struct zcrypt_device {
@@ -114,23 +110,14 @@ struct zcrypt_device {
114 110
115 struct ap_message reply; /* Per-device reply structure. */ 111 struct ap_message reply; /* Per-device reply structure. */
116 int max_exp_bit_length; 112 int max_exp_bit_length;
117
118 debug_info_t *dbf_area; /* debugging */
119}; 113};
120 114
121/* transport layer rescanning */
122extern atomic_t zcrypt_rescan_req;
123
124struct zcrypt_device *zcrypt_device_alloc(size_t); 115struct zcrypt_device *zcrypt_device_alloc(size_t);
125void zcrypt_device_free(struct zcrypt_device *); 116void zcrypt_device_free(struct zcrypt_device *);
126void zcrypt_device_get(struct zcrypt_device *); 117void zcrypt_device_get(struct zcrypt_device *);
127int zcrypt_device_put(struct zcrypt_device *); 118int zcrypt_device_put(struct zcrypt_device *);
128int zcrypt_device_register(struct zcrypt_device *); 119int zcrypt_device_register(struct zcrypt_device *);
129void zcrypt_device_unregister(struct zcrypt_device *); 120void zcrypt_device_unregister(struct zcrypt_device *);
130void zcrypt_msgtype_register(struct zcrypt_ops *);
131void zcrypt_msgtype_unregister(struct zcrypt_ops *);
132struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *, int);
133void zcrypt_msgtype_release(struct zcrypt_ops *);
134int zcrypt_api_init(void); 121int zcrypt_api_init(void);
135void zcrypt_api_exit(void); 122void zcrypt_api_exit(void);
136 123
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index 1f42f103c76..ed82f2f59b1 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_cca_key.h
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2006 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 1e849d6e1df..da171b5f399 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,14 +1,15 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.c
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2012 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -36,7 +37,6 @@
36#include "zcrypt_api.h" 37#include "zcrypt_api.h"
37#include "zcrypt_error.h" 38#include "zcrypt_error.h"
38#include "zcrypt_cex2a.h" 39#include "zcrypt_cex2a.h"
39#include "zcrypt_msgtype50.h"
40 40
41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ 41#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ 42#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
@@ -63,23 +63,364 @@ static struct ap_device_id zcrypt_cex2a_ids[] = {
63 { /* end of list */ }, 63 { /* end of list */ },
64}; 64};
65 65
66#ifndef CONFIG_ZCRYPT_MONOLITHIC
66MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); 67MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
67MODULE_AUTHOR("IBM Corporation"); 68MODULE_AUTHOR("IBM Corporation");
68MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " \ 69MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
69 "Copyright IBM Corp. 2001, 2012"); 70 "Copyright 2001, 2006 IBM Corporation");
70MODULE_LICENSE("GPL"); 71MODULE_LICENSE("GPL");
72#endif
71 73
72static int zcrypt_cex2a_probe(struct ap_device *ap_dev); 74static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
73static void zcrypt_cex2a_remove(struct ap_device *ap_dev); 75static void zcrypt_cex2a_remove(struct ap_device *ap_dev);
76static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
77 struct ap_message *);
74 78
75static struct ap_driver zcrypt_cex2a_driver = { 79static struct ap_driver zcrypt_cex2a_driver = {
76 .probe = zcrypt_cex2a_probe, 80 .probe = zcrypt_cex2a_probe,
77 .remove = zcrypt_cex2a_remove, 81 .remove = zcrypt_cex2a_remove,
82 .receive = zcrypt_cex2a_receive,
78 .ids = zcrypt_cex2a_ids, 83 .ids = zcrypt_cex2a_ids,
79 .request_timeout = CEX2A_CLEANUP_TIME, 84 .request_timeout = CEX2A_CLEANUP_TIME,
80}; 85};
81 86
82/** 87/**
88 * Convert a ICAMEX message to a type50 MEX message.
89 *
90 * @zdev: crypto device pointer
91 * @zreq: crypto request pointer
92 * @mex: pointer to user input data
93 *
94 * Returns 0 on success or -EFAULT.
95 */
96static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
97 struct ap_message *ap_msg,
98 struct ica_rsa_modexpo *mex)
99{
100 unsigned char *mod, *exp, *inp;
101 int mod_len;
102
103 mod_len = mex->inputdatalength;
104
105 if (mod_len <= 128) {
106 struct type50_meb1_msg *meb1 = ap_msg->message;
107 memset(meb1, 0, sizeof(*meb1));
108 ap_msg->length = sizeof(*meb1);
109 meb1->header.msg_type_code = TYPE50_TYPE_CODE;
110 meb1->header.msg_len = sizeof(*meb1);
111 meb1->keyblock_type = TYPE50_MEB1_FMT;
112 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
113 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
114 inp = meb1->message + sizeof(meb1->message) - mod_len;
115 } else if (mod_len <= 256) {
116 struct type50_meb2_msg *meb2 = ap_msg->message;
117 memset(meb2, 0, sizeof(*meb2));
118 ap_msg->length = sizeof(*meb2);
119 meb2->header.msg_type_code = TYPE50_TYPE_CODE;
120 meb2->header.msg_len = sizeof(*meb2);
121 meb2->keyblock_type = TYPE50_MEB2_FMT;
122 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
123 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
124 inp = meb2->message + sizeof(meb2->message) - mod_len;
125 } else {
126 /* mod_len > 256 = 4096 bit RSA Key */
127 struct type50_meb3_msg *meb3 = ap_msg->message;
128 memset(meb3, 0, sizeof(*meb3));
129 ap_msg->length = sizeof(*meb3);
130 meb3->header.msg_type_code = TYPE50_TYPE_CODE;
131 meb3->header.msg_len = sizeof(*meb3);
132 meb3->keyblock_type = TYPE50_MEB3_FMT;
133 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
134 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
135 inp = meb3->message + sizeof(meb3->message) - mod_len;
136 }
137
138 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
139 copy_from_user(exp, mex->b_key, mod_len) ||
140 copy_from_user(inp, mex->inputdata, mod_len))
141 return -EFAULT;
142 return 0;
143}
144
145/**
146 * Convert a ICACRT message to a type50 CRT message.
147 *
148 * @zdev: crypto device pointer
149 * @zreq: crypto request pointer
150 * @crt: pointer to user input data
151 *
152 * Returns 0 on success or -EFAULT.
153 */
154static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
155 struct ap_message *ap_msg,
156 struct ica_rsa_modexpo_crt *crt)
157{
158 int mod_len, short_len, long_len, long_offset, limit;
159 unsigned char *p, *q, *dp, *dq, *u, *inp;
160
161 mod_len = crt->inputdatalength;
162 short_len = mod_len / 2;
163 long_len = mod_len / 2 + 8;
164
165 /*
166 * CEX2A cannot handle p, dp, or U > 128 bytes.
167 * If we have one of these, we need to do extra checking.
168 * For CEX3A the limit is 256 bytes.
169 */
170 if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)
171 limit = 256;
172 else
173 limit = 128;
174
175 if (long_len > limit) {
176 /*
177 * zcrypt_rsa_crt already checked for the leading
178 * zeroes of np_prime, bp_key and u_mult_inc.
179 */
180 long_offset = long_len - limit;
181 long_len = limit;
182 } else
183 long_offset = 0;
184
185 /*
186 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
187 * the larger message structure.
188 */
189 if (long_len <= 64) {
190 struct type50_crb1_msg *crb1 = ap_msg->message;
191 memset(crb1, 0, sizeof(*crb1));
192 ap_msg->length = sizeof(*crb1);
193 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
194 crb1->header.msg_len = sizeof(*crb1);
195 crb1->keyblock_type = TYPE50_CRB1_FMT;
196 p = crb1->p + sizeof(crb1->p) - long_len;
197 q = crb1->q + sizeof(crb1->q) - short_len;
198 dp = crb1->dp + sizeof(crb1->dp) - long_len;
199 dq = crb1->dq + sizeof(crb1->dq) - short_len;
200 u = crb1->u + sizeof(crb1->u) - long_len;
201 inp = crb1->message + sizeof(crb1->message) - mod_len;
202 } else if (long_len <= 128) {
203 struct type50_crb2_msg *crb2 = ap_msg->message;
204 memset(crb2, 0, sizeof(*crb2));
205 ap_msg->length = sizeof(*crb2);
206 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
207 crb2->header.msg_len = sizeof(*crb2);
208 crb2->keyblock_type = TYPE50_CRB2_FMT;
209 p = crb2->p + sizeof(crb2->p) - long_len;
210 q = crb2->q + sizeof(crb2->q) - short_len;
211 dp = crb2->dp + sizeof(crb2->dp) - long_len;
212 dq = crb2->dq + sizeof(crb2->dq) - short_len;
213 u = crb2->u + sizeof(crb2->u) - long_len;
214 inp = crb2->message + sizeof(crb2->message) - mod_len;
215 } else {
216 /* long_len >= 256 */
217 struct type50_crb3_msg *crb3 = ap_msg->message;
218 memset(crb3, 0, sizeof(*crb3));
219 ap_msg->length = sizeof(*crb3);
220 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
221 crb3->header.msg_len = sizeof(*crb3);
222 crb3->keyblock_type = TYPE50_CRB3_FMT;
223 p = crb3->p + sizeof(crb3->p) - long_len;
224 q = crb3->q + sizeof(crb3->q) - short_len;
225 dp = crb3->dp + sizeof(crb3->dp) - long_len;
226 dq = crb3->dq + sizeof(crb3->dq) - short_len;
227 u = crb3->u + sizeof(crb3->u) - long_len;
228 inp = crb3->message + sizeof(crb3->message) - mod_len;
229 }
230
231 if (copy_from_user(p, crt->np_prime + long_offset, long_len) ||
232 copy_from_user(q, crt->nq_prime, short_len) ||
233 copy_from_user(dp, crt->bp_key + long_offset, long_len) ||
234 copy_from_user(dq, crt->bq_key, short_len) ||
235 copy_from_user(u, crt->u_mult_inv + long_offset, long_len) ||
236 copy_from_user(inp, crt->inputdata, mod_len))
237 return -EFAULT;
238
239 return 0;
240}
241
242/**
243 * Copy results from a type 80 reply message back to user space.
244 *
245 * @zdev: crypto device pointer
246 * @reply: reply AP message.
247 * @data: pointer to user output data
248 * @length: size of user output data
249 *
250 * Returns 0 on success or -EFAULT.
251 */
252static int convert_type80(struct zcrypt_device *zdev,
253 struct ap_message *reply,
254 char __user *outputdata,
255 unsigned int outputdatalength)
256{
257 struct type80_hdr *t80h = reply->message;
258 unsigned char *data;
259
260 if (t80h->len < sizeof(*t80h) + outputdatalength) {
261 /* The result is too short, the CEX2A card may not do that.. */
262 zdev->online = 0;
263 return -EAGAIN; /* repeat the request on a different device. */
264 }
265 if (zdev->user_space_type == ZCRYPT_CEX2A)
266 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
267 else
268 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
269 data = reply->message + t80h->len - outputdatalength;
270 if (copy_to_user(outputdata, data, outputdatalength))
271 return -EFAULT;
272 return 0;
273}
274
275static int convert_response(struct zcrypt_device *zdev,
276 struct ap_message *reply,
277 char __user *outputdata,
278 unsigned int outputdatalength)
279{
280 /* Response type byte is the second byte in the response. */
281 switch (((unsigned char *) reply->message)[1]) {
282 case TYPE82_RSP_CODE:
283 case TYPE88_RSP_CODE:
284 return convert_error(zdev, reply);
285 case TYPE80_RSP_CODE:
286 return convert_type80(zdev, reply,
287 outputdata, outputdatalength);
288 default: /* Unknown response type, this should NEVER EVER happen */
289 zdev->online = 0;
290 return -EAGAIN; /* repeat the request on a different device. */
291 }
292}
293
294/**
295 * This function is called from the AP bus code after a crypto request
296 * "msg" has finished with the reply message "reply".
297 * It is called from tasklet context.
298 * @ap_dev: pointer to the AP device
299 * @msg: pointer to the AP message
300 * @reply: pointer to the AP reply message
301 */
302static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
303 struct ap_message *msg,
304 struct ap_message *reply)
305{
306 static struct error_hdr error_reply = {
307 .type = TYPE82_RSP_CODE,
308 .reply_code = REP82_ERROR_MACHINE_FAILURE,
309 };
310 struct type80_hdr *t80h;
311 int length;
312
313 /* Copy the reply message to the request message buffer. */
314 if (IS_ERR(reply)) {
315 memcpy(msg->message, &error_reply, sizeof(error_reply));
316 goto out;
317 }
318 t80h = reply->message;
319 if (t80h->type == TYPE80_RSP_CODE) {
320 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
321 length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len);
322 else
323 length = min(CEX3A_MAX_RESPONSE_SIZE, (int) t80h->len);
324 memcpy(msg->message, reply->message, length);
325 } else
326 memcpy(msg->message, reply->message, sizeof error_reply);
327out:
328 complete((struct completion *) msg->private);
329}
330
331static atomic_t zcrypt_step = ATOMIC_INIT(0);
332
333/**
334 * The request distributor calls this function if it picked the CEX2A
335 * device to handle a modexpo request.
336 * @zdev: pointer to zcrypt_device structure that identifies the
337 * CEX2A device to the request distributor
338 * @mex: pointer to the modexpo request buffer
339 */
340static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
341 struct ica_rsa_modexpo *mex)
342{
343 struct ap_message ap_msg;
344 struct completion work;
345 int rc;
346
347 ap_init_message(&ap_msg);
348 if (zdev->user_space_type == ZCRYPT_CEX2A)
349 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
350 else
351 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
352 if (!ap_msg.message)
353 return -ENOMEM;
354 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
355 atomic_inc_return(&zcrypt_step);
356 ap_msg.private = &work;
357 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
358 if (rc)
359 goto out_free;
360 init_completion(&work);
361 ap_queue_message(zdev->ap_dev, &ap_msg);
362 rc = wait_for_completion_interruptible(&work);
363 if (rc == 0)
364 rc = convert_response(zdev, &ap_msg, mex->outputdata,
365 mex->outputdatalength);
366 else
367 /* Signal pending. */
368 ap_cancel_message(zdev->ap_dev, &ap_msg);
369out_free:
370 kfree(ap_msg.message);
371 return rc;
372}
373
374/**
375 * The request distributor calls this function if it picked the CEX2A
376 * device to handle a modexpo_crt request.
377 * @zdev: pointer to zcrypt_device structure that identifies the
378 * CEX2A device to the request distributor
379 * @crt: pointer to the modexpoc_crt request buffer
380 */
381static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
382 struct ica_rsa_modexpo_crt *crt)
383{
384 struct ap_message ap_msg;
385 struct completion work;
386 int rc;
387
388 ap_init_message(&ap_msg);
389 if (zdev->user_space_type == ZCRYPT_CEX2A)
390 ap_msg.message = kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL);
391 else
392 ap_msg.message = kmalloc(CEX3A_MAX_MESSAGE_SIZE, GFP_KERNEL);
393 if (!ap_msg.message)
394 return -ENOMEM;
395 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
396 atomic_inc_return(&zcrypt_step);
397 ap_msg.private = &work;
398 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
399 if (rc)
400 goto out_free;
401 init_completion(&work);
402 ap_queue_message(zdev->ap_dev, &ap_msg);
403 rc = wait_for_completion_interruptible(&work);
404 if (rc == 0)
405 rc = convert_response(zdev, &ap_msg, crt->outputdata,
406 crt->outputdatalength);
407 else
408 /* Signal pending. */
409 ap_cancel_message(zdev->ap_dev, &ap_msg);
410out_free:
411 kfree(ap_msg.message);
412 return rc;
413}
414
415/**
416 * The crypto operations for a CEX2A card.
417 */
418static struct zcrypt_ops zcrypt_cex2a_ops = {
419 .rsa_modexpo = zcrypt_cex2a_modexpo,
420 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
421};
422
423/**
83 * Probe function for CEX2A cards. It always accepts the AP device 424 * Probe function for CEX2A cards. It always accepts the AP device
84 * since the bus_match already checked the hardware type. 425 * since the bus_match already checked the hardware type.
85 * @ap_dev: pointer to the AP device. 426 * @ap_dev: pointer to the AP device.
@@ -111,8 +452,7 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
111 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; 452 zdev->min_mod_size = CEX2A_MIN_MOD_SIZE;
112 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; 453 zdev->max_mod_size = CEX2A_MAX_MOD_SIZE;
113 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE; 454 zdev->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
114 if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) && 455 if (ap_4096_commands_available(ap_dev->qid)) {
115 ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
116 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE; 456 zdev->max_mod_size = CEX3A_MAX_MOD_SIZE;
117 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE; 457 zdev->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
118 } 458 }
@@ -120,18 +460,16 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
120 zdev->speed_rating = CEX3A_SPEED_RATING; 460 zdev->speed_rating = CEX3A_SPEED_RATING;
121 break; 461 break;
122 } 462 }
123 if (!zdev) 463 if (zdev != NULL) {
124 return -ENODEV; 464 zdev->ap_dev = ap_dev;
125 zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME, 465 zdev->ops = &zcrypt_cex2a_ops;
126 MSGTYPE50_VARIANT_DEFAULT); 466 zdev->online = 1;
127 zdev->ap_dev = ap_dev; 467 ap_dev->reply = &zdev->reply;
128 zdev->online = 1; 468 ap_dev->private = zdev;
129 ap_dev->reply = &zdev->reply; 469 rc = zcrypt_device_register(zdev);
130 ap_dev->private = zdev; 470 }
131 rc = zcrypt_device_register(zdev);
132 if (rc) { 471 if (rc) {
133 ap_dev->private = NULL; 472 ap_dev->private = NULL;
134 zcrypt_msgtype_release(zdev->ops);
135 zcrypt_device_free(zdev); 473 zcrypt_device_free(zdev);
136 } 474 }
137 return rc; 475 return rc;
@@ -144,10 +482,8 @@ static int zcrypt_cex2a_probe(struct ap_device *ap_dev)
144static void zcrypt_cex2a_remove(struct ap_device *ap_dev) 482static void zcrypt_cex2a_remove(struct ap_device *ap_dev)
145{ 483{
146 struct zcrypt_device *zdev = ap_dev->private; 484 struct zcrypt_device *zdev = ap_dev->private;
147 struct zcrypt_ops *zops = zdev->ops;
148 485
149 zcrypt_device_unregister(zdev); 486 zcrypt_device_unregister(zdev);
150 zcrypt_msgtype_release(zops);
151} 487}
152 488
153int __init zcrypt_cex2a_init(void) 489int __init zcrypt_cex2a_init(void)
@@ -160,5 +496,7 @@ void __exit zcrypt_cex2a_exit(void)
160 ap_driver_unregister(&zcrypt_cex2a_driver); 496 ap_driver_unregister(&zcrypt_cex2a_driver);
161} 497}
162 498
499#ifndef CONFIG_ZCRYPT_MONOLITHIC
163module_init(zcrypt_cex2a_init); 500module_init(zcrypt_cex2a_init);
164module_exit(zcrypt_cex2a_exit); 501module_exit(zcrypt_cex2a_exit);
502#endif
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 0dce4b9af18..0350665810c 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_cex2a.h
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2006 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
deleted file mode 100644
index ce1226398ac..00000000000
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
4 */
5
6#include <linux/module.h>
7#include <linux/slab.h>
8#include <linux/init.h>
9#include <linux/err.h>
10#include <linux/atomic.h>
11#include <linux/uaccess.h>
12
13#include "ap_bus.h"
14#include "zcrypt_api.h"
15#include "zcrypt_msgtype6.h"
16#include "zcrypt_msgtype50.h"
17#include "zcrypt_error.h"
18#include "zcrypt_cex4.h"
19
20#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
21#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
22#define CEX4A_MAX_MOD_SIZE_4K 512 /* 4096 bits */
23
24#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
25#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
26
27#define CEX4A_SPEED_RATING 900 /* TODO new card, new speed rating */
28#define CEX4C_SPEED_RATING 6500 /* TODO new card, new speed rating */
29
30#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
31#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
32
33#define CEX4_CLEANUP_TIME (15*HZ)
34
35static struct ap_device_id zcrypt_cex4_ids[] = {
36 { AP_DEVICE(AP_DEVICE_TYPE_CEX4) },
37 { /* end of list */ },
38};
39
40MODULE_DEVICE_TABLE(ap, zcrypt_cex4_ids);
41MODULE_AUTHOR("IBM Corporation");
42MODULE_DESCRIPTION("CEX4 Cryptographic Card device driver, " \
43 "Copyright IBM Corp. 2012");
44MODULE_LICENSE("GPL");
45
46static int zcrypt_cex4_probe(struct ap_device *ap_dev);
47static void zcrypt_cex4_remove(struct ap_device *ap_dev);
48
49static struct ap_driver zcrypt_cex4_driver = {
50 .probe = zcrypt_cex4_probe,
51 .remove = zcrypt_cex4_remove,
52 .ids = zcrypt_cex4_ids,
53 .request_timeout = CEX4_CLEANUP_TIME,
54};
55
56/**
57 * Probe function for CEX4 cards. It always accepts the AP device
58 * since the bus_match already checked the hardware type.
59 * @ap_dev: pointer to the AP device.
60 */
61static int zcrypt_cex4_probe(struct ap_device *ap_dev)
62{
63 struct zcrypt_device *zdev = NULL;
64 int rc = 0;
65
66 switch (ap_dev->device_type) {
67 case AP_DEVICE_TYPE_CEX4:
68 if (ap_test_bit(&ap_dev->functions, AP_FUNC_ACCEL)) {
69 zdev = zcrypt_device_alloc(CEX4A_MAX_MESSAGE_SIZE);
70 if (!zdev)
71 return -ENOMEM;
72 zdev->type_string = "CEX4A";
73 zdev->user_space_type = ZCRYPT_CEX3A;
74 zdev->min_mod_size = CEX4A_MIN_MOD_SIZE;
75 if (ap_test_bit(&ap_dev->functions, AP_FUNC_MEX4K) &&
76 ap_test_bit(&ap_dev->functions, AP_FUNC_CRT4K)) {
77 zdev->max_mod_size =
78 CEX4A_MAX_MOD_SIZE_4K;
79 zdev->max_exp_bit_length =
80 CEX4A_MAX_MOD_SIZE_4K;
81 } else {
82 zdev->max_mod_size =
83 CEX4A_MAX_MOD_SIZE_2K;
84 zdev->max_exp_bit_length =
85 CEX4A_MAX_MOD_SIZE_2K;
86 }
87 zdev->short_crt = 1;
88 zdev->speed_rating = CEX4A_SPEED_RATING;
89 zdev->ops = zcrypt_msgtype_request(MSGTYPE50_NAME,
90 MSGTYPE50_VARIANT_DEFAULT);
91 } else if (ap_test_bit(&ap_dev->functions, AP_FUNC_COPRO)) {
92 zdev = zcrypt_device_alloc(CEX4C_MAX_MESSAGE_SIZE);
93 if (!zdev)
94 return -ENOMEM;
95 zdev->type_string = "CEX4C";
96 zdev->user_space_type = ZCRYPT_CEX3C;
97 zdev->min_mod_size = CEX4C_MIN_MOD_SIZE;
98 zdev->max_mod_size = CEX4C_MAX_MOD_SIZE;
99 zdev->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
100 zdev->short_crt = 0;
101 zdev->speed_rating = CEX4C_SPEED_RATING;
102 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME,
103 MSGTYPE06_VARIANT_DEFAULT);
104 }
105 break;
106 }
107 if (!zdev)
108 return -ENODEV;
109 zdev->ap_dev = ap_dev;
110 zdev->online = 1;
111 ap_dev->reply = &zdev->reply;
112 ap_dev->private = zdev;
113 rc = zcrypt_device_register(zdev);
114 if (rc) {
115 zcrypt_msgtype_release(zdev->ops);
116 ap_dev->private = NULL;
117 zcrypt_device_free(zdev);
118 }
119 return rc;
120}
121
122/**
123 * This is called to remove the extended CEX4 driver information
124 * if an AP device is removed.
125 */
126static void zcrypt_cex4_remove(struct ap_device *ap_dev)
127{
128 struct zcrypt_device *zdev = ap_dev->private;
129 struct zcrypt_ops *zops;
130
131 if (zdev) {
132 zops = zdev->ops;
133 zcrypt_device_unregister(zdev);
134 zcrypt_msgtype_release(zops);
135 }
136}
137
138int __init zcrypt_cex4_init(void)
139{
140 return ap_driver_register(&zcrypt_cex4_driver, THIS_MODULE, "cex4");
141}
142
143void __exit zcrypt_cex4_exit(void)
144{
145 ap_driver_unregister(&zcrypt_cex4_driver);
146}
147
148module_init(zcrypt_cex4_init);
149module_exit(zcrypt_cex4_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex4.h b/drivers/s390/crypto/zcrypt_cex4.h
deleted file mode 100644
index 719571375cc..00000000000
--- a/drivers/s390/crypto/zcrypt_cex4.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
4 */
5
6#ifndef _ZCRYPT_CEX4_H_
7#define _ZCRYPT_CEX4_H_
8
9int zcrypt_cex4_init(void);
10void zcrypt_cex4_exit(void);
11
12#endif /* _ZCRYPT_CEX4_H_ */
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
deleted file mode 100644
index 841ea72e4a4..00000000000
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ /dev/null
@@ -1,59 +0,0 @@
1/*
2 * Copyright IBM Corp. 2012
3 * Author(s): Holger Dengler (hd@linux.vnet.ibm.com)
4 */
5#ifndef ZCRYPT_DEBUG_H
6#define ZCRYPT_DEBUG_H
7
8#include <asm/debug.h>
9#include "zcrypt_api.h"
10
11/* that gives us 15 characters in the text event views */
12#define ZCRYPT_DBF_LEN 16
13
14/* sort out low debug levels early to avoid wasted sprints */
15static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
16{
17 return (level <= dbf_grp->level);
18}
19
20#define DBF_ERR 3 /* error conditions */
21#define DBF_WARN 4 /* warning conditions */
22#define DBF_INFO 6 /* informational */
23
24#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
25
26#define ZCRYPT_DBF_COMMON(level, text...) \
27 do { \
28 if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
29 char debug_buffer[ZCRYPT_DBF_LEN]; \
30 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
31 debug_text_event(zcrypt_dbf_common, level, \
32 debug_buffer); \
33 } \
34 } while (0)
35
36#define ZCRYPT_DBF_DEVICES(level, text...) \
37 do { \
38 if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
39 char debug_buffer[ZCRYPT_DBF_LEN]; \
40 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
41 debug_text_event(zcrypt_dbf_devices, level, \
42 debug_buffer); \
43 } \
44 } while (0)
45
46#define ZCRYPT_DBF_DEV(level, device, text...) \
47 do { \
48 if (zcrypt_dbf_passes(device->dbf_area, level)) { \
49 char debug_buffer[ZCRYPT_DBF_LEN]; \
50 snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
51 debug_text_event(device->dbf_area, level, \
52 debug_buffer); \
53 } \
54 } while (0)
55
56int zcrypt_debug_init(void);
57void zcrypt_debug_exit(void);
58
59#endif /* ZCRYPT_DEBUG_H */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 0079b661721..03ba27f05f9 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_error.h
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2006 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
@@ -26,8 +28,6 @@
26#ifndef _ZCRYPT_ERROR_H_ 28#ifndef _ZCRYPT_ERROR_H_
27#define _ZCRYPT_ERROR_H_ 29#define _ZCRYPT_ERROR_H_
28 30
29#include <linux/atomic.h>
30#include "zcrypt_debug.h"
31#include "zcrypt_api.h" 31#include "zcrypt_api.h"
32 32
33/** 33/**
@@ -110,27 +110,16 @@ static inline int convert_error(struct zcrypt_device *zdev,
110 * and then repeat the request. 110 * and then repeat the request.
111 */ 111 */
112 WARN_ON(1); 112 WARN_ON(1);
113 atomic_set(&zcrypt_rescan_req, 1);
114 zdev->online = 0; 113 zdev->online = 0;
115 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
116 zdev->ap_dev->qid,
117 zdev->online, ehdr->reply_code);
118 return -EAGAIN; 114 return -EAGAIN;
119 case REP82_ERROR_TRANSPORT_FAIL: 115 case REP82_ERROR_TRANSPORT_FAIL:
120 case REP82_ERROR_MACHINE_FAILURE: 116 case REP82_ERROR_MACHINE_FAILURE:
121 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A 117 // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
122 /* If a card fails disable it and repeat the request. */ 118 /* If a card fails disable it and repeat the request. */
123 atomic_set(&zcrypt_rescan_req, 1);
124 zdev->online = 0; 119 zdev->online = 0;
125 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
126 zdev->ap_dev->qid,
127 zdev->online, ehdr->reply_code);
128 return -EAGAIN; 120 return -EAGAIN;
129 default: 121 default:
130 zdev->online = 0; 122 zdev->online = 0;
131 ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
132 zdev->ap_dev->qid,
133 zdev->online, ehdr->reply_code);
134 return -EAGAIN; /* repeat the request on a different device. */ 123 return -EAGAIN; /* repeat the request on a different device. */
135 } 124 }
136} 125}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
deleted file mode 100644
index 7c522f338bd..00000000000
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ /dev/null
@@ -1,517 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/slab.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <linux/atomic.h>
33#include <linux/uaccess.h>
34
35#include "ap_bus.h"
36#include "zcrypt_api.h"
37#include "zcrypt_error.h"
38#include "zcrypt_msgtype50.h"
39
40#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
41
42#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
43
44#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
45 * (max outputdatalength) +
46 * type80_hdr*/
47
48MODULE_AUTHOR("IBM Corporation");
49MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
50 "Copyright IBM Corp. 2001, 2012");
51MODULE_LICENSE("GPL");
52
53static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *,
54 struct ap_message *);
55
56/**
57 * The type 50 message family is associated with a CEX2A card.
58 *
59 * The four members of the family are described below.
60 *
61 * Note that all unsigned char arrays are right-justified and left-padded
62 * with zeroes.
63 *
64 * Note that all reserved fields must be zeroes.
65 */
66struct type50_hdr {
67 unsigned char reserved1;
68 unsigned char msg_type_code; /* 0x50 */
69 unsigned short msg_len;
70 unsigned char reserved2;
71 unsigned char ignored;
72 unsigned short reserved3;
73} __packed;
74
75#define TYPE50_TYPE_CODE 0x50
76
77#define TYPE50_MEB1_FMT 0x0001
78#define TYPE50_MEB2_FMT 0x0002
79#define TYPE50_MEB3_FMT 0x0003
80#define TYPE50_CRB1_FMT 0x0011
81#define TYPE50_CRB2_FMT 0x0012
82#define TYPE50_CRB3_FMT 0x0013
83
84/* Mod-Exp, with a small modulus */
85struct type50_meb1_msg {
86 struct type50_hdr header;
87 unsigned short keyblock_type; /* 0x0001 */
88 unsigned char reserved[6];
89 unsigned char exponent[128];
90 unsigned char modulus[128];
91 unsigned char message[128];
92} __packed;
93
94/* Mod-Exp, with a large modulus */
95struct type50_meb2_msg {
96 struct type50_hdr header;
97 unsigned short keyblock_type; /* 0x0002 */
98 unsigned char reserved[6];
99 unsigned char exponent[256];
100 unsigned char modulus[256];
101 unsigned char message[256];
102} __packed;
103
104/* Mod-Exp, with a larger modulus */
105struct type50_meb3_msg {
106 struct type50_hdr header;
107 unsigned short keyblock_type; /* 0x0003 */
108 unsigned char reserved[6];
109 unsigned char exponent[512];
110 unsigned char modulus[512];
111 unsigned char message[512];
112} __packed;
113
114/* CRT, with a small modulus */
115struct type50_crb1_msg {
116 struct type50_hdr header;
117 unsigned short keyblock_type; /* 0x0011 */
118 unsigned char reserved[6];
119 unsigned char p[64];
120 unsigned char q[64];
121 unsigned char dp[64];
122 unsigned char dq[64];
123 unsigned char u[64];
124 unsigned char message[128];
125} __packed;
126
127/* CRT, with a large modulus */
128struct type50_crb2_msg {
129 struct type50_hdr header;
130 unsigned short keyblock_type; /* 0x0012 */
131 unsigned char reserved[6];
132 unsigned char p[128];
133 unsigned char q[128];
134 unsigned char dp[128];
135 unsigned char dq[128];
136 unsigned char u[128];
137 unsigned char message[256];
138} __packed;
139
140/* CRT, with a larger modulus */
141struct type50_crb3_msg {
142 struct type50_hdr header;
143 unsigned short keyblock_type; /* 0x0013 */
144 unsigned char reserved[6];
145 unsigned char p[256];
146 unsigned char q[256];
147 unsigned char dp[256];
148 unsigned char dq[256];
149 unsigned char u[256];
150 unsigned char message[512];
151} __packed;
152
153/**
154 * The type 80 response family is associated with a CEX2A card.
155 *
156 * Note that all unsigned char arrays are right-justified and left-padded
157 * with zeroes.
158 *
159 * Note that all reserved fields must be zeroes.
160 */
161
162#define TYPE80_RSP_CODE 0x80
163
164struct type80_hdr {
165 unsigned char reserved1;
166 unsigned char type; /* 0x80 */
167 unsigned short len;
168 unsigned char code; /* 0x00 */
169 unsigned char reserved2[3];
170 unsigned char reserved3[8];
171} __packed;
172
173/**
174 * Convert a ICAMEX message to a type50 MEX message.
175 *
176 * @zdev: crypto device pointer
177 * @zreq: crypto request pointer
178 * @mex: pointer to user input data
179 *
180 * Returns 0 on success or -EFAULT.
181 */
182static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev,
183 struct ap_message *ap_msg,
184 struct ica_rsa_modexpo *mex)
185{
186 unsigned char *mod, *exp, *inp;
187 int mod_len;
188
189 mod_len = mex->inputdatalength;
190
191 if (mod_len <= 128) {
192 struct type50_meb1_msg *meb1 = ap_msg->message;
193 memset(meb1, 0, sizeof(*meb1));
194 ap_msg->length = sizeof(*meb1);
195 meb1->header.msg_type_code = TYPE50_TYPE_CODE;
196 meb1->header.msg_len = sizeof(*meb1);
197 meb1->keyblock_type = TYPE50_MEB1_FMT;
198 mod = meb1->modulus + sizeof(meb1->modulus) - mod_len;
199 exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
200 inp = meb1->message + sizeof(meb1->message) - mod_len;
201 } else if (mod_len <= 256) {
202 struct type50_meb2_msg *meb2 = ap_msg->message;
203 memset(meb2, 0, sizeof(*meb2));
204 ap_msg->length = sizeof(*meb2);
205 meb2->header.msg_type_code = TYPE50_TYPE_CODE;
206 meb2->header.msg_len = sizeof(*meb2);
207 meb2->keyblock_type = TYPE50_MEB2_FMT;
208 mod = meb2->modulus + sizeof(meb2->modulus) - mod_len;
209 exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
210 inp = meb2->message + sizeof(meb2->message) - mod_len;
211 } else {
212 /* mod_len > 256 = 4096 bit RSA Key */
213 struct type50_meb3_msg *meb3 = ap_msg->message;
214 memset(meb3, 0, sizeof(*meb3));
215 ap_msg->length = sizeof(*meb3);
216 meb3->header.msg_type_code = TYPE50_TYPE_CODE;
217 meb3->header.msg_len = sizeof(*meb3);
218 meb3->keyblock_type = TYPE50_MEB3_FMT;
219 mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
220 exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
221 inp = meb3->message + sizeof(meb3->message) - mod_len;
222 }
223
224 if (copy_from_user(mod, mex->n_modulus, mod_len) ||
225 copy_from_user(exp, mex->b_key, mod_len) ||
226 copy_from_user(inp, mex->inputdata, mod_len))
227 return -EFAULT;
228 return 0;
229}
230
231/**
232 * Convert a ICACRT message to a type50 CRT message.
233 *
234 * @zdev: crypto device pointer
235 * @zreq: crypto request pointer
236 * @crt: pointer to user input data
237 *
238 * Returns 0 on success or -EFAULT.
239 */
240static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
241 struct ap_message *ap_msg,
242 struct ica_rsa_modexpo_crt *crt)
243{
244 int mod_len, short_len;
245 unsigned char *p, *q, *dp, *dq, *u, *inp;
246
247 mod_len = crt->inputdatalength;
248 short_len = mod_len / 2;
249
250 /*
251 * CEX2A and CEX3A w/o FW update can handle requests up to
252 * 256 byte modulus (2k keys).
253 * CEX3A with FW update and CEX4A cards are able to handle
254 * 512 byte modulus (4k keys).
255 */
256 if (mod_len <= 128) { /* up to 1024 bit key size */
257 struct type50_crb1_msg *crb1 = ap_msg->message;
258 memset(crb1, 0, sizeof(*crb1));
259 ap_msg->length = sizeof(*crb1);
260 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
261 crb1->header.msg_len = sizeof(*crb1);
262 crb1->keyblock_type = TYPE50_CRB1_FMT;
263 p = crb1->p + sizeof(crb1->p) - short_len;
264 q = crb1->q + sizeof(crb1->q) - short_len;
265 dp = crb1->dp + sizeof(crb1->dp) - short_len;
266 dq = crb1->dq + sizeof(crb1->dq) - short_len;
267 u = crb1->u + sizeof(crb1->u) - short_len;
268 inp = crb1->message + sizeof(crb1->message) - mod_len;
269 } else if (mod_len <= 256) { /* up to 2048 bit key size */
270 struct type50_crb2_msg *crb2 = ap_msg->message;
271 memset(crb2, 0, sizeof(*crb2));
272 ap_msg->length = sizeof(*crb2);
273 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
274 crb2->header.msg_len = sizeof(*crb2);
275 crb2->keyblock_type = TYPE50_CRB2_FMT;
276 p = crb2->p + sizeof(crb2->p) - short_len;
277 q = crb2->q + sizeof(crb2->q) - short_len;
278 dp = crb2->dp + sizeof(crb2->dp) - short_len;
279 dq = crb2->dq + sizeof(crb2->dq) - short_len;
280 u = crb2->u + sizeof(crb2->u) - short_len;
281 inp = crb2->message + sizeof(crb2->message) - mod_len;
282 } else if ((mod_len <= 512) && /* up to 4096 bit key size */
283 (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
284 struct type50_crb3_msg *crb3 = ap_msg->message;
285 memset(crb3, 0, sizeof(*crb3));
286 ap_msg->length = sizeof(*crb3);
287 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
288 crb3->header.msg_len = sizeof(*crb3);
289 crb3->keyblock_type = TYPE50_CRB3_FMT;
290 p = crb3->p + sizeof(crb3->p) - short_len;
291 q = crb3->q + sizeof(crb3->q) - short_len;
292 dp = crb3->dp + sizeof(crb3->dp) - short_len;
293 dq = crb3->dq + sizeof(crb3->dq) - short_len;
294 u = crb3->u + sizeof(crb3->u) - short_len;
295 inp = crb3->message + sizeof(crb3->message) - mod_len;
296 } else
297 return -EINVAL;
298
299 /*
300 * correct the offset of p, bp and mult_inv according zcrypt.h
301 * block size right aligned (skip the first byte)
302 */
303 if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
304 copy_from_user(q, crt->nq_prime, short_len) ||
305 copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
306 copy_from_user(dq, crt->bq_key, short_len) ||
307 copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
308 copy_from_user(inp, crt->inputdata, mod_len))
309 return -EFAULT;
310
311 return 0;
312}
313
314/**
315 * Copy results from a type 80 reply message back to user space.
316 *
317 * @zdev: crypto device pointer
318 * @reply: reply AP message.
319 * @data: pointer to user output data
320 * @length: size of user output data
321 *
322 * Returns 0 on success or -EFAULT.
323 */
324static int convert_type80(struct zcrypt_device *zdev,
325 struct ap_message *reply,
326 char __user *outputdata,
327 unsigned int outputdatalength)
328{
329 struct type80_hdr *t80h = reply->message;
330 unsigned char *data;
331
332 if (t80h->len < sizeof(*t80h) + outputdatalength) {
333 /* The result is too short, the CEX2A card may not do that.. */
334 zdev->online = 0;
335 return -EAGAIN; /* repeat the request on a different device. */
336 }
337 if (zdev->user_space_type == ZCRYPT_CEX2A)
338 BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
339 else
340 BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
341 data = reply->message + t80h->len - outputdatalength;
342 if (copy_to_user(outputdata, data, outputdatalength))
343 return -EFAULT;
344 return 0;
345}
346
347static int convert_response(struct zcrypt_device *zdev,
348 struct ap_message *reply,
349 char __user *outputdata,
350 unsigned int outputdatalength)
351{
352 /* Response type byte is the second byte in the response. */
353 switch (((unsigned char *) reply->message)[1]) {
354 case TYPE82_RSP_CODE:
355 case TYPE88_RSP_CODE:
356 return convert_error(zdev, reply);
357 case TYPE80_RSP_CODE:
358 return convert_type80(zdev, reply,
359 outputdata, outputdatalength);
360 default: /* Unknown response type, this should NEVER EVER happen */
361 zdev->online = 0;
362 return -EAGAIN; /* repeat the request on a different device. */
363 }
364}
365
366/**
367 * This function is called from the AP bus code after a crypto request
368 * "msg" has finished with the reply message "reply".
369 * It is called from tasklet context.
370 * @ap_dev: pointer to the AP device
371 * @msg: pointer to the AP message
372 * @reply: pointer to the AP reply message
373 */
374static void zcrypt_cex2a_receive(struct ap_device *ap_dev,
375 struct ap_message *msg,
376 struct ap_message *reply)
377{
378 static struct error_hdr error_reply = {
379 .type = TYPE82_RSP_CODE,
380 .reply_code = REP82_ERROR_MACHINE_FAILURE,
381 };
382 struct type80_hdr *t80h;
383 int length;
384
385 /* Copy the reply message to the request message buffer. */
386 if (IS_ERR(reply)) {
387 memcpy(msg->message, &error_reply, sizeof(error_reply));
388 goto out;
389 }
390 t80h = reply->message;
391 if (t80h->type == TYPE80_RSP_CODE) {
392 if (ap_dev->device_type == AP_DEVICE_TYPE_CEX2A)
393 length = min_t(int,
394 CEX2A_MAX_RESPONSE_SIZE, t80h->len);
395 else
396 length = min_t(int,
397 CEX3A_MAX_RESPONSE_SIZE, t80h->len);
398 memcpy(msg->message, reply->message, length);
399 } else
400 memcpy(msg->message, reply->message, sizeof(error_reply));
401out:
402 complete((struct completion *) msg->private);
403}
404
405static atomic_t zcrypt_step = ATOMIC_INIT(0);
406
407/**
408 * The request distributor calls this function if it picked the CEX2A
409 * device to handle a modexpo request.
410 * @zdev: pointer to zcrypt_device structure that identifies the
411 * CEX2A device to the request distributor
412 * @mex: pointer to the modexpo request buffer
413 */
414static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev,
415 struct ica_rsa_modexpo *mex)
416{
417 struct ap_message ap_msg;
418 struct completion work;
419 int rc;
420
421 ap_init_message(&ap_msg);
422 if (zdev->user_space_type == ZCRYPT_CEX2A)
423 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
424 GFP_KERNEL);
425 else
426 ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
427 GFP_KERNEL);
428 if (!ap_msg.message)
429 return -ENOMEM;
430 ap_msg.receive = zcrypt_cex2a_receive;
431 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
432 atomic_inc_return(&zcrypt_step);
433 ap_msg.private = &work;
434 rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex);
435 if (rc)
436 goto out_free;
437 init_completion(&work);
438 ap_queue_message(zdev->ap_dev, &ap_msg);
439 rc = wait_for_completion_interruptible(&work);
440 if (rc == 0)
441 rc = convert_response(zdev, &ap_msg, mex->outputdata,
442 mex->outputdatalength);
443 else
444 /* Signal pending. */
445 ap_cancel_message(zdev->ap_dev, &ap_msg);
446out_free:
447 kfree(ap_msg.message);
448 return rc;
449}
450
451/**
452 * The request distributor calls this function if it picked the CEX2A
453 * device to handle a modexpo_crt request.
454 * @zdev: pointer to zcrypt_device structure that identifies the
455 * CEX2A device to the request distributor
456 * @crt: pointer to the modexpoc_crt request buffer
457 */
458static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev,
459 struct ica_rsa_modexpo_crt *crt)
460{
461 struct ap_message ap_msg;
462 struct completion work;
463 int rc;
464
465 ap_init_message(&ap_msg);
466 if (zdev->user_space_type == ZCRYPT_CEX2A)
467 ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
468 GFP_KERNEL);
469 else
470 ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
471 GFP_KERNEL);
472 if (!ap_msg.message)
473 return -ENOMEM;
474 ap_msg.receive = zcrypt_cex2a_receive;
475 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
476 atomic_inc_return(&zcrypt_step);
477 ap_msg.private = &work;
478 rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt);
479 if (rc)
480 goto out_free;
481 init_completion(&work);
482 ap_queue_message(zdev->ap_dev, &ap_msg);
483 rc = wait_for_completion_interruptible(&work);
484 if (rc == 0)
485 rc = convert_response(zdev, &ap_msg, crt->outputdata,
486 crt->outputdatalength);
487 else
488 /* Signal pending. */
489 ap_cancel_message(zdev->ap_dev, &ap_msg);
490out_free:
491 kfree(ap_msg.message);
492 return rc;
493}
494
495/**
496 * The crypto operations for message type 50.
497 */
498static struct zcrypt_ops zcrypt_msgtype50_ops = {
499 .rsa_modexpo = zcrypt_cex2a_modexpo,
500 .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
501 .owner = THIS_MODULE,
502 .variant = MSGTYPE50_VARIANT_DEFAULT,
503};
504
505int __init zcrypt_msgtype50_init(void)
506{
507 zcrypt_msgtype_register(&zcrypt_msgtype50_ops);
508 return 0;
509}
510
511void __exit zcrypt_msgtype50_exit(void)
512{
513 zcrypt_msgtype_unregister(&zcrypt_msgtype50_ops);
514}
515
516module_init(zcrypt_msgtype50_init);
517module_exit(zcrypt_msgtype50_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
deleted file mode 100644
index 0a66e4aeeb5..00000000000
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _ZCRYPT_MSGTYPE50_H_
28#define _ZCRYPT_MSGTYPE50_H_
29
30#define MSGTYPE50_NAME "zcrypt_msgtype50"
31#define MSGTYPE50_VARIANT_DEFAULT 0
32
33#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
34#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
35
36#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
37
38int zcrypt_msgtype50_init(void);
39void zcrypt_msgtype50_exit(void);
40
41#endif /* _ZCRYPT_MSGTYPE50_H_ */
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
deleted file mode 100644
index 7d97fa5a26d..00000000000
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ /dev/null
@@ -1,856 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/err.h>
31#include <linux/delay.h>
32#include <linux/slab.h>
33#include <linux/atomic.h>
34#include <linux/uaccess.h>
35
36#include "ap_bus.h"
37#include "zcrypt_api.h"
38#include "zcrypt_error.h"
39#include "zcrypt_msgtype6.h"
40#include "zcrypt_cca_key.h"
41
42#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */
43#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
44
45#define CEIL4(x) ((((x)+3)/4)*4)
46
47struct response_type {
48 struct completion work;
49 int type;
50};
51#define PCIXCC_RESPONSE_TYPE_ICA 0
52#define PCIXCC_RESPONSE_TYPE_XCRB 1
53
54MODULE_AUTHOR("IBM Corporation");
55MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
56 "Copyright IBM Corp. 2001, 2012");
57MODULE_LICENSE("GPL");
58
59static void zcrypt_msgtype6_receive(struct ap_device *, struct ap_message *,
60 struct ap_message *);
61
62/**
63 * CPRB
64 * Note that all shorts, ints and longs are little-endian.
65 * All pointer fields are 32-bits long, and mean nothing
66 *
67 * A request CPRB is followed by a request_parameter_block.
68 *
69 * The request (or reply) parameter block is organized thus:
70 * function code
71 * VUD block
72 * key block
73 */
74struct CPRB {
75 unsigned short cprb_len; /* CPRB length */
76 unsigned char cprb_ver_id; /* CPRB version id. */
77 unsigned char pad_000; /* Alignment pad byte. */
78 unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
79 unsigned char srpi_verb; /* SRPI verb type */
80 unsigned char flags; /* flags */
81 unsigned char func_id[2]; /* function id */
82 unsigned char checkpoint_flag; /* */
83 unsigned char resv2; /* reserved */
84 unsigned short req_parml; /* request parameter buffer */
85 /* length 16-bit little endian */
86 unsigned char req_parmp[4]; /* request parameter buffer *
87 * pointer (means nothing: the *
88 * parameter buffer follows *
89 * the CPRB). */
90 unsigned char req_datal[4]; /* request data buffer */
91 /* length ULELONG */
92 unsigned char req_datap[4]; /* request data buffer */
93 /* pointer */
94 unsigned short rpl_parml; /* reply parameter buffer */
95 /* length 16-bit little endian */
96 unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
97 unsigned char rpl_parmp[4]; /* reply parameter buffer *
98 * pointer (means nothing: the *
99 * parameter buffer follows *
100 * the CPRB). */
101 unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
102 unsigned char rpl_datap[4]; /* reply data buffer */
103 /* pointer */
104 unsigned short ccp_rscode; /* server reason code ULESHORT */
105 unsigned short ccp_rtcode; /* server return code ULESHORT */
106 unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
107 unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
108 unsigned char repd_datal[4]; /* replied data length ULELONG */
109 unsigned char req_pc[2]; /* PC identifier */
110 unsigned char res_origin[8]; /* resource origin */
111 unsigned char mac_value[8]; /* Mac Value */
112 unsigned char logon_id[8]; /* Logon Identifier */
113 unsigned char usage_domain[2]; /* cdx */
114 unsigned char resv3[18]; /* reserved for requestor */
115 unsigned short svr_namel; /* server name length ULESHORT */
116 unsigned char svr_name[8]; /* server name */
117} __packed;
118
119struct function_and_rules_block {
120 unsigned char function_code[2];
121 unsigned short ulen;
122 unsigned char only_rule[8];
123} __packed;
124
125/**
126 * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
127 * card in a type6 message. The 3 fields that must be filled in at execution
128 * time are req_parml, rpl_parml and usage_domain.
129 * Everything about this interface is ascii/big-endian, since the
130 * device does *not* have 'Intel inside'.
131 *
132 * The CPRBX is followed immediately by the parm block.
133 * The parm block contains:
134 * - function code ('PD' 0x5044 or 'PK' 0x504B)
135 * - rule block (one of:)
136 * + 0x000A 'PKCS-1.2' (MCL2 'PD')
137 * + 0x000A 'ZERO-PAD' (MCL2 'PK')
138 * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
139 * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
140 * - VUD block
141 */
142static struct CPRBX static_cprbx = {
143 .cprb_len = 0x00DC,
144 .cprb_ver_id = 0x02,
145 .func_id = {0x54, 0x32},
146};
147
148/**
149 * Convert a ICAMEX message to a type6 MEX message.
150 *
151 * @zdev: crypto device pointer
152 * @ap_msg: pointer to AP message
153 * @mex: pointer to user input data
154 *
155 * Returns 0 on success or -EFAULT.
156 */
157static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
158 struct ap_message *ap_msg,
159 struct ica_rsa_modexpo *mex)
160{
161 static struct type6_hdr static_type6_hdrX = {
162 .type = 0x06,
163 .offset1 = 0x00000058,
164 .agent_id = {'C', 'A',},
165 .function_code = {'P', 'K'},
166 };
167 static struct function_and_rules_block static_pke_fnr = {
168 .function_code = {'P', 'K'},
169 .ulen = 10,
170 .only_rule = {'M', 'R', 'P', ' ', ' ', ' ', ' ', ' '}
171 };
172 static struct function_and_rules_block static_pke_fnr_MCL2 = {
173 .function_code = {'P', 'K'},
174 .ulen = 10,
175 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
176 };
177 struct {
178 struct type6_hdr hdr;
179 struct CPRBX cprbx;
180 struct function_and_rules_block fr;
181 unsigned short length;
182 char text[0];
183 } __packed * msg = ap_msg->message;
184 int size;
185
186 /* VUD.ciphertext */
187 msg->length = mex->inputdatalength + 2;
188 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
189 return -EFAULT;
190
191 /* Set up key which is located after the variable length text. */
192 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
193 if (size < 0)
194 return size;
195 size += sizeof(*msg) + mex->inputdatalength;
196
197 /* message header, cprbx and f&r */
198 msg->hdr = static_type6_hdrX;
199 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
200 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
201
202 msg->cprbx = static_cprbx;
203 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
204 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
205
206 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
207 static_pke_fnr_MCL2 : static_pke_fnr;
208
209 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
210
211 ap_msg->length = size;
212 return 0;
213}
214
215/**
216 * Convert a ICACRT message to a type6 CRT message.
217 *
218 * @zdev: crypto device pointer
219 * @ap_msg: pointer to AP message
220 * @crt: pointer to user input data
221 *
222 * Returns 0 on success or -EFAULT.
223 */
224static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
225 struct ap_message *ap_msg,
226 struct ica_rsa_modexpo_crt *crt)
227{
228 static struct type6_hdr static_type6_hdrX = {
229 .type = 0x06,
230 .offset1 = 0x00000058,
231 .agent_id = {'C', 'A',},
232 .function_code = {'P', 'D'},
233 };
234 static struct function_and_rules_block static_pkd_fnr = {
235 .function_code = {'P', 'D'},
236 .ulen = 10,
237 .only_rule = {'Z', 'E', 'R', 'O', '-', 'P', 'A', 'D'}
238 };
239
240 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
241 .function_code = {'P', 'D'},
242 .ulen = 10,
243 .only_rule = {'P', 'K', 'C', 'S', '-', '1', '.', '2'}
244 };
245 struct {
246 struct type6_hdr hdr;
247 struct CPRBX cprbx;
248 struct function_and_rules_block fr;
249 unsigned short length;
250 char text[0];
251 } __packed * msg = ap_msg->message;
252 int size;
253
254 /* VUD.ciphertext */
255 msg->length = crt->inputdatalength + 2;
256 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
257 return -EFAULT;
258
259 /* Set up key which is located after the variable length text. */
260 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
261 if (size < 0)
262 return size;
263 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
264
265 /* message header, cprbx and f&r */
266 msg->hdr = static_type6_hdrX;
267 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
268 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
269
270 msg->cprbx = static_cprbx;
271 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
272 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
273 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
274
275 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
276 static_pkd_fnr_MCL2 : static_pkd_fnr;
277
278 ap_msg->length = size;
279 return 0;
280}
281
282/**
283 * Convert a XCRB message to a type6 CPRB message.
284 *
285 * @zdev: crypto device pointer
286 * @ap_msg: pointer to AP message
287 * @xcRB: pointer to user input data
288 *
289 * Returns 0 on success or -EFAULT, -EINVAL.
290 */
291struct type86_fmt2_msg {
292 struct type86_hdr hdr;
293 struct type86_fmt2_ext fmt2;
294} __packed;
295
296static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
297 struct ap_message *ap_msg,
298 struct ica_xcRB *xcRB)
299{
300 static struct type6_hdr static_type6_hdrX = {
301 .type = 0x06,
302 .offset1 = 0x00000058,
303 };
304 struct {
305 struct type6_hdr hdr;
306 struct CPRBX cprbx;
307 } __packed * msg = ap_msg->message;
308
309 int rcblen = CEIL4(xcRB->request_control_blk_length);
310 int replylen;
311 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
312 char *function_code;
313
314 /* length checks */
315 ap_msg->length = sizeof(struct type6_hdr) +
316 CEIL4(xcRB->request_control_blk_length) +
317 xcRB->request_data_length;
318 if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
319 return -EINVAL;
320 replylen = sizeof(struct type86_fmt2_msg) +
321 CEIL4(xcRB->reply_control_blk_length) +
322 xcRB->reply_data_length;
323 if (replylen > MSGTYPE06_MAX_MSG_SIZE)
324 return -EINVAL;
325
326 /* prepare type6 header */
327 msg->hdr = static_type6_hdrX;
328 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
329 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
330 if (xcRB->request_data_length) {
331 msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
332 msg->hdr.ToCardLen2 = xcRB->request_data_length;
333 }
334 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
335 msg->hdr.FromCardLen2 = xcRB->reply_data_length;
336
337 /* prepare CPRB */
338 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
339 xcRB->request_control_blk_length))
340 return -EFAULT;
341 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
342 xcRB->request_control_blk_length)
343 return -EINVAL;
344 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
345 memcpy(msg->hdr.function_code, function_code,
346 sizeof(msg->hdr.function_code));
347
348 if (memcmp(function_code, "US", 2) == 0)
349 ap_msg->special = 1;
350 else
351 ap_msg->special = 0;
352
353 /* copy data block */
354 if (xcRB->request_data_length &&
355 copy_from_user(req_data, xcRB->request_data_address,
356 xcRB->request_data_length))
357 return -EFAULT;
358 return 0;
359}
360
361/**
362 * Copy results from a type 86 ICA reply message back to user space.
363 *
364 * @zdev: crypto device pointer
365 * @reply: reply AP message.
366 * @data: pointer to user output data
367 * @length: size of user output data
368 *
369 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
370 */
371struct type86x_reply {
372 struct type86_hdr hdr;
373 struct type86_fmt2_ext fmt2;
374 struct CPRBX cprbx;
375 unsigned char pad[4]; /* 4 byte function code/rules block ? */
376 unsigned short length;
377 char text[0];
378} __packed;
379
380static int convert_type86_ica(struct zcrypt_device *zdev,
381 struct ap_message *reply,
382 char __user *outputdata,
383 unsigned int outputdatalength)
384{
385 static unsigned char static_pad[] = {
386 0x00, 0x02,
387 0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD,
388 0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57,
389 0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B,
390 0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39,
391 0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5,
392 0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D,
393 0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB,
394 0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F,
395 0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9,
396 0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45,
397 0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9,
398 0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F,
399 0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD,
400 0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D,
401 0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD,
402 0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9,
403 0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B,
404 0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B,
405 0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B,
406 0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD,
407 0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7,
408 0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1,
409 0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3,
410 0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23,
411 0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55,
412 0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43,
413 0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F,
414 0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F,
415 0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5,
416 0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD,
417 0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
418 0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
419 };
420 struct type86x_reply *msg = reply->message;
421 unsigned short service_rc, service_rs;
422 unsigned int reply_len, pad_len;
423 char *data;
424
425 service_rc = msg->cprbx.ccp_rtcode;
426 if (unlikely(service_rc != 0)) {
427 service_rs = msg->cprbx.ccp_rscode;
428 if (service_rc == 8 && service_rs == 66)
429 return -EINVAL;
430 if (service_rc == 8 && service_rs == 65)
431 return -EINVAL;
432 if (service_rc == 8 && service_rs == 770)
433 return -EINVAL;
434 if (service_rc == 8 && service_rs == 783) {
435 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
436 return -EAGAIN;
437 }
438 if (service_rc == 12 && service_rs == 769)
439 return -EINVAL;
440 if (service_rc == 8 && service_rs == 72)
441 return -EINVAL;
442 zdev->online = 0;
443 return -EAGAIN; /* repeat the request on a different device. */
444 }
445 data = msg->text;
446 reply_len = msg->length - 2;
447 if (reply_len > outputdatalength)
448 return -EINVAL;
449 /*
450 * For all encipher requests, the length of the ciphertext (reply_len)
451 * will always equal the modulus length. For MEX decipher requests
452 * the output needs to get padded. Minimum pad size is 10.
453 *
454 * Currently, the cases where padding will be added is for:
455 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
456 * ZERO-PAD and CRT is only supported for PKD requests)
457 * - PCICC, always
458 */
459 pad_len = outputdatalength - reply_len;
460 if (pad_len > 0) {
461 if (pad_len < 10)
462 return -EINVAL;
463 /* 'restore' padding left in the PCICC/PCIXCC card. */
464 if (copy_to_user(outputdata, static_pad, pad_len - 1))
465 return -EFAULT;
466 if (put_user(0, outputdata + pad_len - 1))
467 return -EFAULT;
468 }
469 /* Copy the crypto response to user space. */
470 if (copy_to_user(outputdata + pad_len, data, reply_len))
471 return -EFAULT;
472 return 0;
473}
474
475/**
476 * Copy results from a type 86 XCRB reply message back to user space.
477 *
478 * @zdev: crypto device pointer
479 * @reply: reply AP message.
480 * @xcRB: pointer to XCRB
481 *
482 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
483 */
484static int convert_type86_xcrb(struct zcrypt_device *zdev,
485 struct ap_message *reply,
486 struct ica_xcRB *xcRB)
487{
488 struct type86_fmt2_msg *msg = reply->message;
489 char *data = reply->message;
490
491 /* Copy CPRB to user */
492 if (copy_to_user(xcRB->reply_control_blk_addr,
493 data + msg->fmt2.offset1, msg->fmt2.count1))
494 return -EFAULT;
495 xcRB->reply_control_blk_length = msg->fmt2.count1;
496
497 /* Copy data buffer to user */
498 if (msg->fmt2.count2)
499 if (copy_to_user(xcRB->reply_data_addr,
500 data + msg->fmt2.offset2, msg->fmt2.count2))
501 return -EFAULT;
502 xcRB->reply_data_length = msg->fmt2.count2;
503 return 0;
504}
505
506static int convert_type86_rng(struct zcrypt_device *zdev,
507 struct ap_message *reply,
508 char *buffer)
509{
510 struct {
511 struct type86_hdr hdr;
512 struct type86_fmt2_ext fmt2;
513 struct CPRBX cprbx;
514 } __packed * msg = reply->message;
515 char *data = reply->message;
516
517 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
518 return -EINVAL;
519 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
520 return msg->fmt2.count2;
521}
522
523static int convert_response_ica(struct zcrypt_device *zdev,
524 struct ap_message *reply,
525 char __user *outputdata,
526 unsigned int outputdatalength)
527{
528 struct type86x_reply *msg = reply->message;
529
530 /* Response type byte is the second byte in the response. */
531 switch (((unsigned char *) reply->message)[1]) {
532 case TYPE82_RSP_CODE:
533 case TYPE88_RSP_CODE:
534 return convert_error(zdev, reply);
535 case TYPE86_RSP_CODE:
536 if (msg->cprbx.ccp_rtcode &&
537 (msg->cprbx.ccp_rscode == 0x14f) &&
538 (outputdatalength > 256)) {
539 if (zdev->max_exp_bit_length <= 17) {
540 zdev->max_exp_bit_length = 17;
541 return -EAGAIN;
542 } else
543 return -EINVAL;
544 }
545 if (msg->hdr.reply_code)
546 return convert_error(zdev, reply);
547 if (msg->cprbx.cprb_ver_id == 0x02)
548 return convert_type86_ica(zdev, reply,
549 outputdata, outputdatalength);
550 /* Fall through, no break, incorrect cprb version is an unknown
551 * response */
552 default: /* Unknown response type, this should NEVER EVER happen */
553 zdev->online = 0;
554 return -EAGAIN; /* repeat the request on a different device. */
555 }
556}
557
558static int convert_response_xcrb(struct zcrypt_device *zdev,
559 struct ap_message *reply,
560 struct ica_xcRB *xcRB)
561{
562 struct type86x_reply *msg = reply->message;
563
564 /* Response type byte is the second byte in the response. */
565 switch (((unsigned char *) reply->message)[1]) {
566 case TYPE82_RSP_CODE:
567 case TYPE88_RSP_CODE:
568 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
569 return convert_error(zdev, reply);
570 case TYPE86_RSP_CODE:
571 if (msg->hdr.reply_code) {
572 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
573 return convert_error(zdev, reply);
574 }
575 if (msg->cprbx.cprb_ver_id == 0x02)
576 return convert_type86_xcrb(zdev, reply, xcRB);
577 /* Fall through, no break, incorrect cprb version is an unknown
578 * response */
579 default: /* Unknown response type, this should NEVER EVER happen */
580 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
581 zdev->online = 0;
582 return -EAGAIN; /* repeat the request on a different device. */
583 }
584}
585
586static int convert_response_rng(struct zcrypt_device *zdev,
587 struct ap_message *reply,
588 char *data)
589{
590 struct type86x_reply *msg = reply->message;
591
592 switch (msg->hdr.type) {
593 case TYPE82_RSP_CODE:
594 case TYPE88_RSP_CODE:
595 return -EINVAL;
596 case TYPE86_RSP_CODE:
597 if (msg->hdr.reply_code)
598 return -EINVAL;
599 if (msg->cprbx.cprb_ver_id == 0x02)
600 return convert_type86_rng(zdev, reply, data);
601 /* Fall through, no break, incorrect cprb version is an unknown
602 * response */
603 default: /* Unknown response type, this should NEVER EVER happen */
604 zdev->online = 0;
605 return -EAGAIN; /* repeat the request on a different device. */
606 }
607}
608
609/**
610 * This function is called from the AP bus code after a crypto request
611 * "msg" has finished with the reply message "reply".
612 * It is called from tasklet context.
613 * @ap_dev: pointer to the AP device
614 * @msg: pointer to the AP message
615 * @reply: pointer to the AP reply message
616 */
617static void zcrypt_msgtype6_receive(struct ap_device *ap_dev,
618 struct ap_message *msg,
619 struct ap_message *reply)
620{
621 static struct error_hdr error_reply = {
622 .type = TYPE82_RSP_CODE,
623 .reply_code = REP82_ERROR_MACHINE_FAILURE,
624 };
625 struct response_type *resp_type =
626 (struct response_type *) msg->private;
627 struct type86x_reply *t86r;
628 int length;
629
630 /* Copy the reply message to the request message buffer. */
631 if (IS_ERR(reply)) {
632 memcpy(msg->message, &error_reply, sizeof(error_reply));
633 goto out;
634 }
635 t86r = reply->message;
636 if (t86r->hdr.type == TYPE86_RSP_CODE &&
637 t86r->cprbx.cprb_ver_id == 0x02) {
638 switch (resp_type->type) {
639 case PCIXCC_RESPONSE_TYPE_ICA:
640 length = sizeof(struct type86x_reply)
641 + t86r->length - 2;
642 length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
643 memcpy(msg->message, reply->message, length);
644 break;
645 case PCIXCC_RESPONSE_TYPE_XCRB:
646 length = t86r->fmt2.offset2 + t86r->fmt2.count2;
647 length = min(MSGTYPE06_MAX_MSG_SIZE, length);
648 memcpy(msg->message, reply->message, length);
649 break;
650 default:
651 memcpy(msg->message, &error_reply,
652 sizeof(error_reply));
653 }
654 } else
655 memcpy(msg->message, reply->message, sizeof(error_reply));
656out:
657 complete(&(resp_type->work));
658}
659
660static atomic_t zcrypt_step = ATOMIC_INIT(0);
661
662/**
663 * The request distributor calls this function if it picked the PCIXCC/CEX2C
664 * device to handle a modexpo request.
665 * @zdev: pointer to zcrypt_device structure that identifies the
666 * PCIXCC/CEX2C device to the request distributor
667 * @mex: pointer to the modexpo request buffer
668 */
669static long zcrypt_msgtype6_modexpo(struct zcrypt_device *zdev,
670 struct ica_rsa_modexpo *mex)
671{
672 struct ap_message ap_msg;
673 struct response_type resp_type = {
674 .type = PCIXCC_RESPONSE_TYPE_ICA,
675 };
676 int rc;
677
678 ap_init_message(&ap_msg);
679 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
680 if (!ap_msg.message)
681 return -ENOMEM;
682 ap_msg.receive = zcrypt_msgtype6_receive;
683 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
684 atomic_inc_return(&zcrypt_step);
685 ap_msg.private = &resp_type;
686 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
687 if (rc)
688 goto out_free;
689 init_completion(&resp_type.work);
690 ap_queue_message(zdev->ap_dev, &ap_msg);
691 rc = wait_for_completion_interruptible(&resp_type.work);
692 if (rc == 0)
693 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
694 mex->outputdatalength);
695 else
696 /* Signal pending. */
697 ap_cancel_message(zdev->ap_dev, &ap_msg);
698out_free:
699 free_page((unsigned long) ap_msg.message);
700 return rc;
701}
702
703/**
704 * The request distributor calls this function if it picked the PCIXCC/CEX2C
705 * device to handle a modexpo_crt request.
706 * @zdev: pointer to zcrypt_device structure that identifies the
707 * PCIXCC/CEX2C device to the request distributor
708 * @crt: pointer to the modexpoc_crt request buffer
709 */
710static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_device *zdev,
711 struct ica_rsa_modexpo_crt *crt)
712{
713 struct ap_message ap_msg;
714 struct response_type resp_type = {
715 .type = PCIXCC_RESPONSE_TYPE_ICA,
716 };
717 int rc;
718
719 ap_init_message(&ap_msg);
720 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
721 if (!ap_msg.message)
722 return -ENOMEM;
723 ap_msg.receive = zcrypt_msgtype6_receive;
724 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
725 atomic_inc_return(&zcrypt_step);
726 ap_msg.private = &resp_type;
727 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
728 if (rc)
729 goto out_free;
730 init_completion(&resp_type.work);
731 ap_queue_message(zdev->ap_dev, &ap_msg);
732 rc = wait_for_completion_interruptible(&resp_type.work);
733 if (rc == 0)
734 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
735 crt->outputdatalength);
736 else
737 /* Signal pending. */
738 ap_cancel_message(zdev->ap_dev, &ap_msg);
739out_free:
740 free_page((unsigned long) ap_msg.message);
741 return rc;
742}
743
744/**
745 * The request distributor calls this function if it picked the PCIXCC/CEX2C
746 * device to handle a send_cprb request.
747 * @zdev: pointer to zcrypt_device structure that identifies the
748 * PCIXCC/CEX2C device to the request distributor
749 * @xcRB: pointer to the send_cprb request buffer
750 */
751static long zcrypt_msgtype6_send_cprb(struct zcrypt_device *zdev,
752 struct ica_xcRB *xcRB)
753{
754 struct ap_message ap_msg;
755 struct response_type resp_type = {
756 .type = PCIXCC_RESPONSE_TYPE_XCRB,
757 };
758 int rc;
759
760 ap_init_message(&ap_msg);
761 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
762 if (!ap_msg.message)
763 return -ENOMEM;
764 ap_msg.receive = zcrypt_msgtype6_receive;
765 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
766 atomic_inc_return(&zcrypt_step);
767 ap_msg.private = &resp_type;
768 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
769 if (rc)
770 goto out_free;
771 init_completion(&resp_type.work);
772 ap_queue_message(zdev->ap_dev, &ap_msg);
773 rc = wait_for_completion_interruptible(&resp_type.work);
774 if (rc == 0)
775 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
776 else
777 /* Signal pending. */
778 ap_cancel_message(zdev->ap_dev, &ap_msg);
779out_free:
780 kzfree(ap_msg.message);
781 return rc;
782}
783
784/**
785 * The request distributor calls this function if it picked the PCIXCC/CEX2C
786 * device to generate random data.
787 * @zdev: pointer to zcrypt_device structure that identifies the
788 * PCIXCC/CEX2C device to the request distributor
789 * @buffer: pointer to a memory page to return random data
790 */
791
792static long zcrypt_msgtype6_rng(struct zcrypt_device *zdev,
793 char *buffer)
794{
795 struct ap_message ap_msg;
796 struct response_type resp_type = {
797 .type = PCIXCC_RESPONSE_TYPE_XCRB,
798 };
799 int rc;
800
801 ap_init_message(&ap_msg);
802 ap_msg.message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
803 if (!ap_msg.message)
804 return -ENOMEM;
805 ap_msg.receive = zcrypt_msgtype6_receive;
806 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
807 atomic_inc_return(&zcrypt_step);
808 ap_msg.private = &resp_type;
809 rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
810 init_completion(&resp_type.work);
811 ap_queue_message(zdev->ap_dev, &ap_msg);
812 rc = wait_for_completion_interruptible(&resp_type.work);
813 if (rc == 0)
814 rc = convert_response_rng(zdev, &ap_msg, buffer);
815 else
816 /* Signal pending. */
817 ap_cancel_message(zdev->ap_dev, &ap_msg);
818 kfree(ap_msg.message);
819 return rc;
820}
821
822/**
823 * The crypto operations for a PCIXCC/CEX2C card.
824 */
825static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
826 .owner = THIS_MODULE,
827 .variant = MSGTYPE06_VARIANT_NORNG,
828 .rsa_modexpo = zcrypt_msgtype6_modexpo,
829 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
830 .send_cprb = zcrypt_msgtype6_send_cprb,
831};
832
833static struct zcrypt_ops zcrypt_msgtype6_ops = {
834 .owner = THIS_MODULE,
835 .variant = MSGTYPE06_VARIANT_DEFAULT,
836 .rsa_modexpo = zcrypt_msgtype6_modexpo,
837 .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
838 .send_cprb = zcrypt_msgtype6_send_cprb,
839 .rng = zcrypt_msgtype6_rng,
840};
841
842int __init zcrypt_msgtype6_init(void)
843{
844 zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
845 zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
846 return 0;
847}
848
849void __exit zcrypt_msgtype6_exit(void)
850{
851 zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
852 zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
853}
854
855module_init(zcrypt_msgtype6_init);
856module_exit(zcrypt_msgtype6_exit);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
deleted file mode 100644
index 1e500d3c073..00000000000
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ /dev/null
@@ -1,169 +0,0 @@
1/*
2 * zcrypt 2.1.0
3 *
4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#ifndef _ZCRYPT_MSGTYPE6_H_
28#define _ZCRYPT_MSGTYPE6_H_
29
30#include <asm/zcrypt.h>
31
32#define MSGTYPE06_NAME "zcrypt_msgtype6"
33#define MSGTYPE06_VARIANT_DEFAULT 0
34#define MSGTYPE06_VARIANT_NORNG 1
35
36#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
37
38/**
39 * The type 6 message family is associated with PCICC or PCIXCC cards.
40 *
41 * It contains a message header followed by a CPRB, both of which
42 * are described below.
43 *
44 * Note that all reserved fields must be zeroes.
45 */
46struct type6_hdr {
47 unsigned char reserved1; /* 0x00 */
48 unsigned char type; /* 0x06 */
49 unsigned char reserved2[2]; /* 0x0000 */
50 unsigned char right[4]; /* 0x00000000 */
51 unsigned char reserved3[2]; /* 0x0000 */
52 unsigned char reserved4[2]; /* 0x0000 */
53 unsigned char apfs[4]; /* 0x00000000 */
54 unsigned int offset1; /* 0x00000058 (offset to CPRB) */
55 unsigned int offset2; /* 0x00000000 */
56 unsigned int offset3; /* 0x00000000 */
57 unsigned int offset4; /* 0x00000000 */
58 unsigned char agent_id[16]; /* PCICC: */
59 /* 0x0100 */
60 /* 0x4343412d4150504c202020 */
61 /* 0x010101 */
62 /* PCIXCC: */
63 /* 0x4341000000000000 */
64 /* 0x0000000000000000 */
65 unsigned char rqid[2]; /* rqid. internal to 603 */
66 unsigned char reserved5[2]; /* 0x0000 */
67 unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
68 unsigned char reserved6[2]; /* 0x0000 */
69 unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
70 unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
71 unsigned int ToCardLen3; /* 0x00000000 */
72 unsigned int ToCardLen4; /* 0x00000000 */
73 unsigned int FromCardLen1; /* response buffer length */
74 unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
75 unsigned int FromCardLen3; /* 0x00000000 */
76 unsigned int FromCardLen4; /* 0x00000000 */
77} __packed;
78
79/**
80 * The type 86 message family is associated with PCICC and PCIXCC cards.
81 *
82 * It contains a message header followed by a CPRB. The CPRB is
83 * the same as the request CPRB, which is described above.
84 *
85 * If format is 1, an error condition exists and no data beyond
86 * the 8-byte message header is of interest.
87 *
88 * The non-error message is shown below.
89 *
90 * Note that all reserved fields must be zeroes.
91 */
92struct type86_hdr {
93 unsigned char reserved1; /* 0x00 */
94 unsigned char type; /* 0x86 */
95 unsigned char format; /* 0x01 (error) or 0x02 (ok) */
96 unsigned char reserved2; /* 0x00 */
97 unsigned char reply_code; /* reply code (see above) */
98 unsigned char reserved3[3]; /* 0x000000 */
99} __packed;
100
101#define TYPE86_RSP_CODE 0x86
102#define TYPE86_FMT2 0x02
103
104struct type86_fmt2_ext {
105 unsigned char reserved[4]; /* 0x00000000 */
106 unsigned char apfs[4]; /* final status */
107 unsigned int count1; /* length of CPRB + parameters */
108 unsigned int offset1; /* offset to CPRB */
109 unsigned int count2; /* 0x00000000 */
110 unsigned int offset2; /* db offset 0x00000000 for PKD */
111 unsigned int count3; /* 0x00000000 */
112 unsigned int offset3; /* 0x00000000 */
113 unsigned int count4; /* 0x00000000 */
114 unsigned int offset4; /* 0x00000000 */
115} __packed;
116
117/**
118 * Prepare a type6 CPRB message for random number generation
119 *
120 * @ap_dev: AP device pointer
121 * @ap_msg: pointer to AP message
122 */
123static inline void rng_type6CPRB_msgX(struct ap_device *ap_dev,
124 struct ap_message *ap_msg,
125 unsigned random_number_length)
126{
127 struct {
128 struct type6_hdr hdr;
129 struct CPRBX cprbx;
130 char function_code[2];
131 short int rule_length;
132 char rule[8];
133 short int verb_length;
134 short int key_length;
135 } __packed * msg = ap_msg->message;
136 static struct type6_hdr static_type6_hdrX = {
137 .type = 0x06,
138 .offset1 = 0x00000058,
139 .agent_id = {'C', 'A'},
140 .function_code = {'R', 'L'},
141 .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
142 .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
143 };
144 static struct CPRBX local_cprbx = {
145 .cprb_len = 0x00dc,
146 .cprb_ver_id = 0x02,
147 .func_id = {0x54, 0x32},
148 .req_parml = sizeof(*msg) - sizeof(msg->hdr) -
149 sizeof(msg->cprbx),
150 .rpl_msgbl = sizeof(*msg) - sizeof(msg->hdr),
151 };
152
153 msg->hdr = static_type6_hdrX;
154 msg->hdr.FromCardLen2 = random_number_length,
155 msg->cprbx = local_cprbx;
156 msg->cprbx.rpl_datal = random_number_length,
157 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
158 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
159 msg->rule_length = 0x0a;
160 memcpy(msg->rule, "RANDOM ", 8);
161 msg->verb_length = 0x02;
162 msg->key_length = 0x02;
163 ap_msg->length = sizeof(*msg);
164}
165
166int zcrypt_msgtype6_init(void);
167void zcrypt_msgtype6_exit(void);
168
169#endif /* _ZCRYPT_MSGTYPE6_H_ */
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index f2b71d8df01..d84816f144d 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.c
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2006 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
@@ -51,11 +53,13 @@ static struct ap_device_id zcrypt_pcica_ids[] = {
51 { /* end of list */ }, 53 { /* end of list */ },
52}; 54};
53 55
56#ifndef CONFIG_ZCRYPT_MONOLITHIC
54MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids); 57MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
55MODULE_AUTHOR("IBM Corporation"); 58MODULE_AUTHOR("IBM Corporation");
56MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, " 59MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
57 "Copyright IBM Corp. 2001, 2006"); 60 "Copyright 2001, 2006 IBM Corporation");
58MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
62#endif
59 63
60static int zcrypt_pcica_probe(struct ap_device *ap_dev); 64static int zcrypt_pcica_probe(struct ap_device *ap_dev);
61static void zcrypt_pcica_remove(struct ap_device *ap_dev); 65static void zcrypt_pcica_remove(struct ap_device *ap_dev);
@@ -65,6 +69,7 @@ static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *,
65static struct ap_driver zcrypt_pcica_driver = { 69static struct ap_driver zcrypt_pcica_driver = {
66 .probe = zcrypt_pcica_probe, 70 .probe = zcrypt_pcica_probe,
67 .remove = zcrypt_pcica_remove, 71 .remove = zcrypt_pcica_remove,
72 .receive = zcrypt_pcica_receive,
68 .ids = zcrypt_pcica_ids, 73 .ids = zcrypt_pcica_ids,
69 .request_timeout = PCICA_CLEANUP_TIME, 74 .request_timeout = PCICA_CLEANUP_TIME,
70}; 75};
@@ -281,7 +286,6 @@ static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev,
281 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 286 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
282 if (!ap_msg.message) 287 if (!ap_msg.message)
283 return -ENOMEM; 288 return -ENOMEM;
284 ap_msg.receive = zcrypt_pcica_receive;
285 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 289 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
286 atomic_inc_return(&zcrypt_step); 290 atomic_inc_return(&zcrypt_step);
287 ap_msg.private = &work; 291 ap_msg.private = &work;
@@ -320,7 +324,6 @@ static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev,
320 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); 324 ap_msg.message = kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL);
321 if (!ap_msg.message) 325 if (!ap_msg.message)
322 return -ENOMEM; 326 return -ENOMEM;
323 ap_msg.receive = zcrypt_pcica_receive;
324 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 327 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
325 atomic_inc_return(&zcrypt_step); 328 atomic_inc_return(&zcrypt_step);
326 ap_msg.private = &work; 329 ap_msg.private = &work;
@@ -405,5 +408,7 @@ void zcrypt_pcica_exit(void)
405 ap_driver_unregister(&zcrypt_pcica_driver); 408 ap_driver_unregister(&zcrypt_pcica_driver);
406} 409}
407 410
411#ifndef CONFIG_ZCRYPT_MONOLITHIC
408module_init(zcrypt_pcica_init); 412module_init(zcrypt_pcica_init);
409module_exit(zcrypt_pcica_exit); 413module_exit(zcrypt_pcica_exit);
414#endif
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
index 9a59155cad5..3be11187f6d 100644
--- a/drivers/s390/crypto/zcrypt_pcica.h
+++ b/drivers/s390/crypto/zcrypt_pcica.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_pcica.h
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2006 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index 0d90a433405..bdbdbe19299 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.c
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2006 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
@@ -63,11 +65,13 @@ static struct ap_device_id zcrypt_pcicc_ids[] = {
63 { /* end of list */ }, 65 { /* end of list */ },
64}; 66};
65 67
68#ifndef CONFIG_ZCRYPT_MONOLITHIC
66MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids); 69MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
67MODULE_AUTHOR("IBM Corporation"); 70MODULE_AUTHOR("IBM Corporation");
68MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, " 71MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
69 "Copyright IBM Corp. 2001, 2006"); 72 "Copyright 2001, 2006 IBM Corporation");
70MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
74#endif
71 75
72static int zcrypt_pcicc_probe(struct ap_device *ap_dev); 76static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
73static void zcrypt_pcicc_remove(struct ap_device *ap_dev); 77static void zcrypt_pcicc_remove(struct ap_device *ap_dev);
@@ -77,6 +81,7 @@ static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *,
77static struct ap_driver zcrypt_pcicc_driver = { 81static struct ap_driver zcrypt_pcicc_driver = {
78 .probe = zcrypt_pcicc_probe, 82 .probe = zcrypt_pcicc_probe,
79 .remove = zcrypt_pcicc_remove, 83 .remove = zcrypt_pcicc_remove,
84 .receive = zcrypt_pcicc_receive,
80 .ids = zcrypt_pcicc_ids, 85 .ids = zcrypt_pcicc_ids,
81 .request_timeout = PCICC_CLEANUP_TIME, 86 .request_timeout = PCICC_CLEANUP_TIME,
82}; 87};
@@ -485,7 +490,6 @@ static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev,
485 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 490 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
486 if (!ap_msg.message) 491 if (!ap_msg.message)
487 return -ENOMEM; 492 return -ENOMEM;
488 ap_msg.receive = zcrypt_pcicc_receive;
489 ap_msg.length = PAGE_SIZE; 493 ap_msg.length = PAGE_SIZE;
490 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 494 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
491 atomic_inc_return(&zcrypt_step); 495 atomic_inc_return(&zcrypt_step);
@@ -525,7 +529,6 @@ static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev,
525 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); 529 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
526 if (!ap_msg.message) 530 if (!ap_msg.message)
527 return -ENOMEM; 531 return -ENOMEM;
528 ap_msg.receive = zcrypt_pcicc_receive;
529 ap_msg.length = PAGE_SIZE; 532 ap_msg.length = PAGE_SIZE;
530 ap_msg.psmid = (((unsigned long long) current->pid) << 32) + 533 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
531 atomic_inc_return(&zcrypt_step); 534 atomic_inc_return(&zcrypt_step);
@@ -611,5 +614,7 @@ void zcrypt_pcicc_exit(void)
611 ap_driver_unregister(&zcrypt_pcicc_driver); 614 ap_driver_unregister(&zcrypt_pcicc_driver);
612} 615}
613 616
617#ifndef CONFIG_ZCRYPT_MONOLITHIC
614module_init(zcrypt_pcicc_init); 618module_init(zcrypt_pcicc_init);
615module_exit(zcrypt_pcicc_exit); 619module_exit(zcrypt_pcicc_exit);
620#endif
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
index 7fe27e15075..6d4454846c8 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.h
+++ b/drivers/s390/crypto/zcrypt_pcicc.h
@@ -1,7 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_pcicc.h
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2006 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 899ffa19f5e..dd4737808e0 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -1,14 +1,15 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.c
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2012 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 * Ralph Wuerthner <rwuerthn@de.ibm.com>
11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 15 * it under the terms of the GNU General Public License as published by
@@ -36,7 +37,7 @@
36#include "ap_bus.h" 37#include "ap_bus.h"
37#include "zcrypt_api.h" 38#include "zcrypt_api.h"
38#include "zcrypt_error.h" 39#include "zcrypt_error.h"
39#include "zcrypt_msgtype6.h" 40#include "zcrypt_pcicc.h"
40#include "zcrypt_pcixcc.h" 41#include "zcrypt_pcixcc.h"
41#include "zcrypt_cca_key.h" 42#include "zcrypt_cca_key.h"
42 43
@@ -55,6 +56,11 @@
55#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ 56#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
56 57
57#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024) 58#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
59#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
60#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
61#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
62
63#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
58 64
59#define PCIXCC_CLEANUP_TIME (15*HZ) 65#define PCIXCC_CLEANUP_TIME (15*HZ)
60 66
@@ -74,23 +80,791 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = {
74 { /* end of list */ }, 80 { /* end of list */ },
75}; 81};
76 82
83#ifndef CONFIG_ZCRYPT_MONOLITHIC
77MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); 84MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
78MODULE_AUTHOR("IBM Corporation"); 85MODULE_AUTHOR("IBM Corporation");
79MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " \ 86MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
80 "Copyright IBM Corp. 2001, 2012"); 87 "Copyright 2001, 2006 IBM Corporation");
81MODULE_LICENSE("GPL"); 88MODULE_LICENSE("GPL");
89#endif
82 90
83static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); 91static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
84static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); 92static void zcrypt_pcixcc_remove(struct ap_device *ap_dev);
93static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *,
94 struct ap_message *);
85 95
86static struct ap_driver zcrypt_pcixcc_driver = { 96static struct ap_driver zcrypt_pcixcc_driver = {
87 .probe = zcrypt_pcixcc_probe, 97 .probe = zcrypt_pcixcc_probe,
88 .remove = zcrypt_pcixcc_remove, 98 .remove = zcrypt_pcixcc_remove,
99 .receive = zcrypt_pcixcc_receive,
89 .ids = zcrypt_pcixcc_ids, 100 .ids = zcrypt_pcixcc_ids,
90 .request_timeout = PCIXCC_CLEANUP_TIME, 101 .request_timeout = PCIXCC_CLEANUP_TIME,
91}; 102};
92 103
93/** 104/**
105 * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C
106 * card in a type6 message. The 3 fields that must be filled in at execution
107 * time are req_parml, rpl_parml and usage_domain.
108 * Everything about this interface is ascii/big-endian, since the
109 * device does *not* have 'Intel inside'.
110 *
111 * The CPRBX is followed immediately by the parm block.
112 * The parm block contains:
113 * - function code ('PD' 0x5044 or 'PK' 0x504B)
114 * - rule block (one of:)
115 * + 0x000A 'PKCS-1.2' (MCL2 'PD')
116 * + 0x000A 'ZERO-PAD' (MCL2 'PK')
117 * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD')
118 * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK')
119 * - VUD block
120 */
121static struct CPRBX static_cprbx = {
122 .cprb_len = 0x00DC,
123 .cprb_ver_id = 0x02,
124 .func_id = {0x54,0x32},
125};
126
127/**
128 * Convert a ICAMEX message to a type6 MEX message.
129 *
130 * @zdev: crypto device pointer
131 * @ap_msg: pointer to AP message
132 * @mex: pointer to user input data
133 *
134 * Returns 0 on success or -EFAULT.
135 */
136static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev,
137 struct ap_message *ap_msg,
138 struct ica_rsa_modexpo *mex)
139{
140 static struct type6_hdr static_type6_hdrX = {
141 .type = 0x06,
142 .offset1 = 0x00000058,
143 .agent_id = {'C','A',},
144 .function_code = {'P','K'},
145 };
146 static struct function_and_rules_block static_pke_fnr = {
147 .function_code = {'P','K'},
148 .ulen = 10,
149 .only_rule = {'M','R','P',' ',' ',' ',' ',' '}
150 };
151 static struct function_and_rules_block static_pke_fnr_MCL2 = {
152 .function_code = {'P','K'},
153 .ulen = 10,
154 .only_rule = {'Z','E','R','O','-','P','A','D'}
155 };
156 struct {
157 struct type6_hdr hdr;
158 struct CPRBX cprbx;
159 struct function_and_rules_block fr;
160 unsigned short length;
161 char text[0];
162 } __attribute__((packed)) *msg = ap_msg->message;
163 int size;
164
165 /* VUD.ciphertext */
166 msg->length = mex->inputdatalength + 2;
167 if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength))
168 return -EFAULT;
169
170 /* Set up key which is located after the variable length text. */
171 size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1);
172 if (size < 0)
173 return size;
174 size += sizeof(*msg) + mex->inputdatalength;
175
176 /* message header, cprbx and f&r */
177 msg->hdr = static_type6_hdrX;
178 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
179 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
180
181 msg->cprbx = static_cprbx;
182 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
183 msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
184
185 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
186 static_pke_fnr_MCL2 : static_pke_fnr;
187
188 msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
189
190 ap_msg->length = size;
191 return 0;
192}
193
194/**
195 * Convert a ICACRT message to a type6 CRT message.
196 *
197 * @zdev: crypto device pointer
198 * @ap_msg: pointer to AP message
199 * @crt: pointer to user input data
200 *
201 * Returns 0 on success or -EFAULT.
202 */
203static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
204 struct ap_message *ap_msg,
205 struct ica_rsa_modexpo_crt *crt)
206{
207 static struct type6_hdr static_type6_hdrX = {
208 .type = 0x06,
209 .offset1 = 0x00000058,
210 .agent_id = {'C','A',},
211 .function_code = {'P','D'},
212 };
213 static struct function_and_rules_block static_pkd_fnr = {
214 .function_code = {'P','D'},
215 .ulen = 10,
216 .only_rule = {'Z','E','R','O','-','P','A','D'}
217 };
218
219 static struct function_and_rules_block static_pkd_fnr_MCL2 = {
220 .function_code = {'P','D'},
221 .ulen = 10,
222 .only_rule = {'P','K','C','S','-','1','.','2'}
223 };
224 struct {
225 struct type6_hdr hdr;
226 struct CPRBX cprbx;
227 struct function_and_rules_block fr;
228 unsigned short length;
229 char text[0];
230 } __attribute__((packed)) *msg = ap_msg->message;
231 int size;
232
233 /* VUD.ciphertext */
234 msg->length = crt->inputdatalength + 2;
235 if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength))
236 return -EFAULT;
237
238 /* Set up key which is located after the variable length text. */
239 size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1);
240 if (size < 0)
241 return size;
242 size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */
243
244 /* message header, cprbx and f&r */
245 msg->hdr = static_type6_hdrX;
246 msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
247 msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
248
249 msg->cprbx = static_cprbx;
250 msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid);
251 msg->cprbx.req_parml = msg->cprbx.rpl_msgbl =
252 size - sizeof(msg->hdr) - sizeof(msg->cprbx);
253
254 msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ?
255 static_pkd_fnr_MCL2 : static_pkd_fnr;
256
257 ap_msg->length = size;
258 return 0;
259}
260
261/**
262 * Convert a XCRB message to a type6 CPRB message.
263 *
264 * @zdev: crypto device pointer
265 * @ap_msg: pointer to AP message
266 * @xcRB: pointer to user input data
267 *
268 * Returns 0 on success or -EFAULT.
269 */
270struct type86_fmt2_msg {
271 struct type86_hdr hdr;
272 struct type86_fmt2_ext fmt2;
273} __attribute__((packed));
274
275static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
276 struct ap_message *ap_msg,
277 struct ica_xcRB *xcRB)
278{
279 static struct type6_hdr static_type6_hdrX = {
280 .type = 0x06,
281 .offset1 = 0x00000058,
282 };
283 struct {
284 struct type6_hdr hdr;
285 struct CPRBX cprbx;
286 } __attribute__((packed)) *msg = ap_msg->message;
287
288 int rcblen = CEIL4(xcRB->request_control_blk_length);
289 int replylen;
290 char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
291 char *function_code;
292
293 /* length checks */
294 ap_msg->length = sizeof(struct type6_hdr) +
295 CEIL4(xcRB->request_control_blk_length) +
296 xcRB->request_data_length;
297 if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
298 return -EFAULT;
299 if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
300 return -EFAULT;
301 if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
302 return -EFAULT;
303 replylen = CEIL4(xcRB->reply_control_blk_length) +
304 CEIL4(xcRB->reply_data_length) +
305 sizeof(struct type86_fmt2_msg);
306 if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
307 xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
308 (sizeof(struct type86_fmt2_msg) +
309 CEIL4(xcRB->reply_data_length));
310 }
311
312 /* prepare type6 header */
313 msg->hdr = static_type6_hdrX;
314 memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
315 msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
316 if (xcRB->request_data_length) {
317 msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
318 msg->hdr.ToCardLen2 = xcRB->request_data_length;
319 }
320 msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
321 msg->hdr.FromCardLen2 = xcRB->reply_data_length;
322
323 /* prepare CPRB */
324 if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
325 xcRB->request_control_blk_length))
326 return -EFAULT;
327 if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
328 xcRB->request_control_blk_length)
329 return -EFAULT;
330 function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
331 memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
332
333 if (memcmp(function_code, "US", 2) == 0)
334 ap_msg->special = 1;
335 else
336 ap_msg->special = 0;
337
338 /* copy data block */
339 if (xcRB->request_data_length &&
340 copy_from_user(req_data, xcRB->request_data_address,
341 xcRB->request_data_length))
342 return -EFAULT;
343 return 0;
344}
345
346/**
347 * Prepare a type6 CPRB message for random number generation
348 *
349 * @ap_dev: AP device pointer
350 * @ap_msg: pointer to AP message
351 */
352static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
353 struct ap_message *ap_msg,
354 unsigned random_number_length)
355{
356 struct {
357 struct type6_hdr hdr;
358 struct CPRBX cprbx;
359 char function_code[2];
360 short int rule_length;
361 char rule[8];
362 short int verb_length;
363 short int key_length;
364 } __attribute__((packed)) *msg = ap_msg->message;
365 static struct type6_hdr static_type6_hdrX = {
366 .type = 0x06,
367 .offset1 = 0x00000058,
368 .agent_id = {'C', 'A'},
369 .function_code = {'R', 'L'},
370 .ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
371 .FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
372 };
373 static struct CPRBX local_cprbx = {
374 .cprb_len = 0x00dc,
375 .cprb_ver_id = 0x02,
376 .func_id = {0x54, 0x32},
377 .req_parml = sizeof *msg - sizeof(msg->hdr) -
378 sizeof(msg->cprbx),
379 .rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
380 };
381
382 msg->hdr = static_type6_hdrX;
383 msg->hdr.FromCardLen2 = random_number_length,
384 msg->cprbx = local_cprbx;
385 msg->cprbx.rpl_datal = random_number_length,
386 msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
387 memcpy(msg->function_code, msg->hdr.function_code, 0x02);
388 msg->rule_length = 0x0a;
389 memcpy(msg->rule, "RANDOM ", 8);
390 msg->verb_length = 0x02;
391 msg->key_length = 0x02;
392 ap_msg->length = sizeof *msg;
393}
394
395/**
396 * Copy results from a type 86 ICA reply message back to user space.
397 *
398 * @zdev: crypto device pointer
399 * @reply: reply AP message.
400 * @data: pointer to user output data
401 * @length: size of user output data
402 *
403 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
404 */
405struct type86x_reply {
406 struct type86_hdr hdr;
407 struct type86_fmt2_ext fmt2;
408 struct CPRBX cprbx;
409 unsigned char pad[4]; /* 4 byte function code/rules block ? */
410 unsigned short length;
411 char text[0];
412} __attribute__((packed));
413
414static int convert_type86_ica(struct zcrypt_device *zdev,
415 struct ap_message *reply,
416 char __user *outputdata,
417 unsigned int outputdatalength)
418{
419 static unsigned char static_pad[] = {
420 0x00,0x02,
421 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,
422 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57,
423 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,
424 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39,
425 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,
426 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D,
427 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,
428 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F,
429 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,
430 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45,
431 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,
432 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F,
433 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,
434 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D,
435 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,
436 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9,
437 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,
438 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B,
439 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,
440 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD,
441 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,
442 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1,
443 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,
444 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23,
445 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,
446 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43,
447 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,
448 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F,
449 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,
450 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD,
451 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,
452 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09
453 };
454 struct type86x_reply *msg = reply->message;
455 unsigned short service_rc, service_rs;
456 unsigned int reply_len, pad_len;
457 char *data;
458
459 service_rc = msg->cprbx.ccp_rtcode;
460 if (unlikely(service_rc != 0)) {
461 service_rs = msg->cprbx.ccp_rscode;
462 if (service_rc == 8 && service_rs == 66)
463 return -EINVAL;
464 if (service_rc == 8 && service_rs == 65)
465 return -EINVAL;
466 if (service_rc == 8 && service_rs == 770)
467 return -EINVAL;
468 if (service_rc == 8 && service_rs == 783) {
469 zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD;
470 return -EAGAIN;
471 }
472 if (service_rc == 12 && service_rs == 769)
473 return -EINVAL;
474 if (service_rc == 8 && service_rs == 72)
475 return -EINVAL;
476 zdev->online = 0;
477 return -EAGAIN; /* repeat the request on a different device. */
478 }
479 data = msg->text;
480 reply_len = msg->length - 2;
481 if (reply_len > outputdatalength)
482 return -EINVAL;
483 /*
484 * For all encipher requests, the length of the ciphertext (reply_len)
485 * will always equal the modulus length. For MEX decipher requests
486 * the output needs to get padded. Minimum pad size is 10.
487 *
488 * Currently, the cases where padding will be added is for:
489 * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
490 * ZERO-PAD and CRT is only supported for PKD requests)
491 * - PCICC, always
492 */
493 pad_len = outputdatalength - reply_len;
494 if (pad_len > 0) {
495 if (pad_len < 10)
496 return -EINVAL;
497 /* 'restore' padding left in the PCICC/PCIXCC card. */
498 if (copy_to_user(outputdata, static_pad, pad_len - 1))
499 return -EFAULT;
500 if (put_user(0, outputdata + pad_len - 1))
501 return -EFAULT;
502 }
503 /* Copy the crypto response to user space. */
504 if (copy_to_user(outputdata + pad_len, data, reply_len))
505 return -EFAULT;
506 return 0;
507}
508
509/**
510 * Copy results from a type 86 XCRB reply message back to user space.
511 *
512 * @zdev: crypto device pointer
513 * @reply: reply AP message.
514 * @xcRB: pointer to XCRB
515 *
516 * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
517 */
518static int convert_type86_xcrb(struct zcrypt_device *zdev,
519 struct ap_message *reply,
520 struct ica_xcRB *xcRB)
521{
522 struct type86_fmt2_msg *msg = reply->message;
523 char *data = reply->message;
524
525 /* Copy CPRB to user */
526 if (copy_to_user(xcRB->reply_control_blk_addr,
527 data + msg->fmt2.offset1, msg->fmt2.count1))
528 return -EFAULT;
529 xcRB->reply_control_blk_length = msg->fmt2.count1;
530
531 /* Copy data buffer to user */
532 if (msg->fmt2.count2)
533 if (copy_to_user(xcRB->reply_data_addr,
534 data + msg->fmt2.offset2, msg->fmt2.count2))
535 return -EFAULT;
536 xcRB->reply_data_length = msg->fmt2.count2;
537 return 0;
538}
539
540static int convert_type86_rng(struct zcrypt_device *zdev,
541 struct ap_message *reply,
542 char *buffer)
543{
544 struct {
545 struct type86_hdr hdr;
546 struct type86_fmt2_ext fmt2;
547 struct CPRBX cprbx;
548 } __attribute__((packed)) *msg = reply->message;
549 char *data = reply->message;
550
551 if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
552 return -EINVAL;
553 memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
554 return msg->fmt2.count2;
555}
556
557static int convert_response_ica(struct zcrypt_device *zdev,
558 struct ap_message *reply,
559 char __user *outputdata,
560 unsigned int outputdatalength)
561{
562 struct type86x_reply *msg = reply->message;
563
564 /* Response type byte is the second byte in the response. */
565 switch (((unsigned char *) reply->message)[1]) {
566 case TYPE82_RSP_CODE:
567 case TYPE88_RSP_CODE:
568 return convert_error(zdev, reply);
569 case TYPE86_RSP_CODE:
570 if (msg->cprbx.ccp_rtcode &&
571 (msg->cprbx.ccp_rscode == 0x14f) &&
572 (outputdatalength > 256)) {
573 if (zdev->max_exp_bit_length <= 17) {
574 zdev->max_exp_bit_length = 17;
575 return -EAGAIN;
576 } else
577 return -EINVAL;
578 }
579 if (msg->hdr.reply_code)
580 return convert_error(zdev, reply);
581 if (msg->cprbx.cprb_ver_id == 0x02)
582 return convert_type86_ica(zdev, reply,
583 outputdata, outputdatalength);
584 /* Fall through, no break, incorrect cprb version is an unknown
585 * response */
586 default: /* Unknown response type, this should NEVER EVER happen */
587 zdev->online = 0;
588 return -EAGAIN; /* repeat the request on a different device. */
589 }
590}
591
592static int convert_response_xcrb(struct zcrypt_device *zdev,
593 struct ap_message *reply,
594 struct ica_xcRB *xcRB)
595{
596 struct type86x_reply *msg = reply->message;
597
598 /* Response type byte is the second byte in the response. */
599 switch (((unsigned char *) reply->message)[1]) {
600 case TYPE82_RSP_CODE:
601 case TYPE88_RSP_CODE:
602 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
603 return convert_error(zdev, reply);
604 case TYPE86_RSP_CODE:
605 if (msg->hdr.reply_code) {
606 memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
607 return convert_error(zdev, reply);
608 }
609 if (msg->cprbx.cprb_ver_id == 0x02)
610 return convert_type86_xcrb(zdev, reply, xcRB);
611 /* Fall through, no break, incorrect cprb version is an unknown
612 * response */
613 default: /* Unknown response type, this should NEVER EVER happen */
614 xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
615 zdev->online = 0;
616 return -EAGAIN; /* repeat the request on a different device. */
617 }
618}
619
620static int convert_response_rng(struct zcrypt_device *zdev,
621 struct ap_message *reply,
622 char *data)
623{
624 struct type86x_reply *msg = reply->message;
625
626 switch (msg->hdr.type) {
627 case TYPE82_RSP_CODE:
628 case TYPE88_RSP_CODE:
629 return -EINVAL;
630 case TYPE86_RSP_CODE:
631 if (msg->hdr.reply_code)
632 return -EINVAL;
633 if (msg->cprbx.cprb_ver_id == 0x02)
634 return convert_type86_rng(zdev, reply, data);
635 /* Fall through, no break, incorrect cprb version is an unknown
636 * response */
637 default: /* Unknown response type, this should NEVER EVER happen */
638 zdev->online = 0;
639 return -EAGAIN; /* repeat the request on a different device. */
640 }
641}
642
643/**
644 * This function is called from the AP bus code after a crypto request
645 * "msg" has finished with the reply message "reply".
646 * It is called from tasklet context.
647 * @ap_dev: pointer to the AP device
648 * @msg: pointer to the AP message
649 * @reply: pointer to the AP reply message
650 */
651static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
652 struct ap_message *msg,
653 struct ap_message *reply)
654{
655 static struct error_hdr error_reply = {
656 .type = TYPE82_RSP_CODE,
657 .reply_code = REP82_ERROR_MACHINE_FAILURE,
658 };
659 struct response_type *resp_type =
660 (struct response_type *) msg->private;
661 struct type86x_reply *t86r;
662 int length;
663
664 /* Copy the reply message to the request message buffer. */
665 if (IS_ERR(reply)) {
666 memcpy(msg->message, &error_reply, sizeof(error_reply));
667 goto out;
668 }
669 t86r = reply->message;
670 if (t86r->hdr.type == TYPE86_RSP_CODE &&
671 t86r->cprbx.cprb_ver_id == 0x02) {
672 switch (resp_type->type) {
673 case PCIXCC_RESPONSE_TYPE_ICA:
674 length = sizeof(struct type86x_reply)
675 + t86r->length - 2;
676 length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length);
677 memcpy(msg->message, reply->message, length);
678 break;
679 case PCIXCC_RESPONSE_TYPE_XCRB:
680 length = t86r->fmt2.offset2 + t86r->fmt2.count2;
681 length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
682 memcpy(msg->message, reply->message, length);
683 break;
684 default:
685 memcpy(msg->message, &error_reply, sizeof error_reply);
686 }
687 } else
688 memcpy(msg->message, reply->message, sizeof error_reply);
689out:
690 complete(&(resp_type->work));
691}
692
693static atomic_t zcrypt_step = ATOMIC_INIT(0);
694
695/**
696 * The request distributor calls this function if it picked the PCIXCC/CEX2C
697 * device to handle a modexpo request.
698 * @zdev: pointer to zcrypt_device structure that identifies the
699 * PCIXCC/CEX2C device to the request distributor
700 * @mex: pointer to the modexpo request buffer
701 */
702static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev,
703 struct ica_rsa_modexpo *mex)
704{
705 struct ap_message ap_msg;
706 struct response_type resp_type = {
707 .type = PCIXCC_RESPONSE_TYPE_ICA,
708 };
709 int rc;
710
711 ap_init_message(&ap_msg);
712 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
713 if (!ap_msg.message)
714 return -ENOMEM;
715 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
716 atomic_inc_return(&zcrypt_step);
717 ap_msg.private = &resp_type;
718 rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex);
719 if (rc)
720 goto out_free;
721 init_completion(&resp_type.work);
722 ap_queue_message(zdev->ap_dev, &ap_msg);
723 rc = wait_for_completion_interruptible(&resp_type.work);
724 if (rc == 0)
725 rc = convert_response_ica(zdev, &ap_msg, mex->outputdata,
726 mex->outputdatalength);
727 else
728 /* Signal pending. */
729 ap_cancel_message(zdev->ap_dev, &ap_msg);
730out_free:
731 free_page((unsigned long) ap_msg.message);
732 return rc;
733}
734
735/**
736 * The request distributor calls this function if it picked the PCIXCC/CEX2C
737 * device to handle a modexpo_crt request.
738 * @zdev: pointer to zcrypt_device structure that identifies the
739 * PCIXCC/CEX2C device to the request distributor
740 * @crt: pointer to the modexpoc_crt request buffer
741 */
742static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev,
743 struct ica_rsa_modexpo_crt *crt)
744{
745 struct ap_message ap_msg;
746 struct response_type resp_type = {
747 .type = PCIXCC_RESPONSE_TYPE_ICA,
748 };
749 int rc;
750
751 ap_init_message(&ap_msg);
752 ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
753 if (!ap_msg.message)
754 return -ENOMEM;
755 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
756 atomic_inc_return(&zcrypt_step);
757 ap_msg.private = &resp_type;
758 rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt);
759 if (rc)
760 goto out_free;
761 init_completion(&resp_type.work);
762 ap_queue_message(zdev->ap_dev, &ap_msg);
763 rc = wait_for_completion_interruptible(&resp_type.work);
764 if (rc == 0)
765 rc = convert_response_ica(zdev, &ap_msg, crt->outputdata,
766 crt->outputdatalength);
767 else
768 /* Signal pending. */
769 ap_cancel_message(zdev->ap_dev, &ap_msg);
770out_free:
771 free_page((unsigned long) ap_msg.message);
772 return rc;
773}
774
775/**
776 * The request distributor calls this function if it picked the PCIXCC/CEX2C
777 * device to handle a send_cprb request.
778 * @zdev: pointer to zcrypt_device structure that identifies the
779 * PCIXCC/CEX2C device to the request distributor
780 * @xcRB: pointer to the send_cprb request buffer
781 */
782static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
783 struct ica_xcRB *xcRB)
784{
785 struct ap_message ap_msg;
786 struct response_type resp_type = {
787 .type = PCIXCC_RESPONSE_TYPE_XCRB,
788 };
789 int rc;
790
791 ap_init_message(&ap_msg);
792 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
793 if (!ap_msg.message)
794 return -ENOMEM;
795 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
796 atomic_inc_return(&zcrypt_step);
797 ap_msg.private = &resp_type;
798 rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB);
799 if (rc)
800 goto out_free;
801 init_completion(&resp_type.work);
802 ap_queue_message(zdev->ap_dev, &ap_msg);
803 rc = wait_for_completion_interruptible(&resp_type.work);
804 if (rc == 0)
805 rc = convert_response_xcrb(zdev, &ap_msg, xcRB);
806 else
807 /* Signal pending. */
808 ap_cancel_message(zdev->ap_dev, &ap_msg);
809out_free:
810 kzfree(ap_msg.message);
811 return rc;
812}
813
814/**
815 * The request distributor calls this function if it picked the PCIXCC/CEX2C
816 * device to generate random data.
817 * @zdev: pointer to zcrypt_device structure that identifies the
818 * PCIXCC/CEX2C device to the request distributor
819 * @buffer: pointer to a memory page to return random data
820 */
821
822static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
823 char *buffer)
824{
825 struct ap_message ap_msg;
826 struct response_type resp_type = {
827 .type = PCIXCC_RESPONSE_TYPE_XCRB,
828 };
829 int rc;
830
831 ap_init_message(&ap_msg);
832 ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
833 if (!ap_msg.message)
834 return -ENOMEM;
835 ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
836 atomic_inc_return(&zcrypt_step);
837 ap_msg.private = &resp_type;
838 rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
839 init_completion(&resp_type.work);
840 ap_queue_message(zdev->ap_dev, &ap_msg);
841 rc = wait_for_completion_interruptible(&resp_type.work);
842 if (rc == 0)
843 rc = convert_response_rng(zdev, &ap_msg, buffer);
844 else
845 /* Signal pending. */
846 ap_cancel_message(zdev->ap_dev, &ap_msg);
847 kfree(ap_msg.message);
848 return rc;
849}
850
851/**
852 * The crypto operations for a PCIXCC/CEX2C card.
853 */
854static struct zcrypt_ops zcrypt_pcixcc_ops = {
855 .rsa_modexpo = zcrypt_pcixcc_modexpo,
856 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
857 .send_cprb = zcrypt_pcixcc_send_cprb,
858};
859
860static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
861 .rsa_modexpo = zcrypt_pcixcc_modexpo,
862 .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
863 .send_cprb = zcrypt_pcixcc_send_cprb,
864 .rng = zcrypt_pcixcc_rng,
865};
866
867/**
94 * Micro-code detection function. Its sends a message to a pcixcc card 868 * Micro-code detection function. Its sends a message to a pcixcc card
95 * to find out the microcode level. 869 * to find out the microcode level.
96 * @ap_dev: pointer to the AP device. 870 * @ap_dev: pointer to the AP device.
@@ -269,7 +1043,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
269 struct zcrypt_device *zdev; 1043 struct zcrypt_device *zdev;
270 int rc = 0; 1044 int rc = 0;
271 1045
272 zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE); 1046 zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
273 if (!zdev) 1047 if (!zdev)
274 return -ENOMEM; 1048 return -ENOMEM;
275 zdev->ap_dev = ap_dev; 1049 zdev->ap_dev = ap_dev;
@@ -322,11 +1096,9 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
322 return rc; 1096 return rc;
323 } 1097 }
324 if (rc) 1098 if (rc)
325 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 1099 zdev->ops = &zcrypt_pcixcc_with_rng_ops;
326 MSGTYPE06_VARIANT_DEFAULT);
327 else 1100 else
328 zdev->ops = zcrypt_msgtype_request(MSGTYPE06_NAME, 1101 zdev->ops = &zcrypt_pcixcc_ops;
329 MSGTYPE06_VARIANT_NORNG);
330 ap_dev->reply = &zdev->reply; 1102 ap_dev->reply = &zdev->reply;
331 ap_dev->private = zdev; 1103 ap_dev->private = zdev;
332 rc = zcrypt_device_register(zdev); 1104 rc = zcrypt_device_register(zdev);
@@ -336,7 +1108,6 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
336 1108
337 out_free: 1109 out_free:
338 ap_dev->private = NULL; 1110 ap_dev->private = NULL;
339 zcrypt_msgtype_release(zdev->ops);
340 zcrypt_device_free(zdev); 1111 zcrypt_device_free(zdev);
341 return rc; 1112 return rc;
342} 1113}
@@ -348,10 +1119,8 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
348static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) 1119static void zcrypt_pcixcc_remove(struct ap_device *ap_dev)
349{ 1120{
350 struct zcrypt_device *zdev = ap_dev->private; 1121 struct zcrypt_device *zdev = ap_dev->private;
351 struct zcrypt_ops *zops = zdev->ops;
352 1122
353 zcrypt_device_unregister(zdev); 1123 zcrypt_device_unregister(zdev);
354 zcrypt_msgtype_release(zops);
355} 1124}
356 1125
357int __init zcrypt_pcixcc_init(void) 1126int __init zcrypt_pcixcc_init(void)
@@ -364,5 +1133,7 @@ void zcrypt_pcixcc_exit(void)
364 ap_driver_unregister(&zcrypt_pcixcc_driver); 1133 ap_driver_unregister(&zcrypt_pcixcc_driver);
365} 1134}
366 1135
1136#ifndef CONFIG_ZCRYPT_MONOLITHIC
367module_init(zcrypt_pcixcc_init); 1137module_init(zcrypt_pcixcc_init);
368module_exit(zcrypt_pcixcc_exit); 1138module_exit(zcrypt_pcixcc_exit);
1139#endif
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index eacafc8962f..8cb7d7a6973 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -1,13 +1,14 @@
1/* 1/*
2 * linux/drivers/s390/crypto/zcrypt_pcixcc.h
3 *
2 * zcrypt 2.1.0 4 * zcrypt 2.1.0
3 * 5 *
4 * Copyright IBM Corp. 2001, 2012 6 * Copyright (C) 2001, 2006 IBM Corporation
5 * Author(s): Robert Burroughs 7 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
7 * 9 *
8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
10 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 14 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 8491111aec1..aec60d55b10 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * virtio for kvm on s390 2 * kvm_virtio.c - virtio for kvm on s390
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008
5 * 5 *
@@ -20,12 +20,10 @@
20#include <linux/virtio_console.h> 20#include <linux/virtio_console.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/virtio_ring.h> 22#include <linux/virtio_ring.h>
23#include <linux/export.h>
24#include <linux/pfn.h> 23#include <linux/pfn.h>
25#include <asm/io.h> 24#include <asm/io.h>
26#include <asm/kvm_para.h> 25#include <asm/kvm_para.h>
27#include <asm/kvm_virtio.h> 26#include <asm/kvm_virtio.h>
28#include <asm/sclp.h>
29#include <asm/setup.h> 27#include <asm/setup.h>
30#include <asm/irq.h> 28#include <asm/irq.h>
31 29
@@ -35,7 +33,7 @@
35 * The pointer to our (page) of device descriptions. 33 * The pointer to our (page) of device descriptions.
36 */ 34 */
37static void *kvm_devices; 35static void *kvm_devices;
38static struct work_struct hotplug_work; 36struct work_struct hotplug_work;
39 37
40struct kvm_device { 38struct kvm_device {
41 struct virtio_device vdev; 39 struct virtio_device vdev;
@@ -190,9 +188,6 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
190 if (index >= kdev->desc->num_vq) 188 if (index >= kdev->desc->num_vq)
191 return ERR_PTR(-ENOENT); 189 return ERR_PTR(-ENOENT);
192 190
193 if (!name)
194 return NULL;
195
196 config = kvm_vq_config(kdev->desc)+index; 191 config = kvm_vq_config(kdev->desc)+index;
197 192
198 err = vmem_add_mapping(config->address, 193 err = vmem_add_mapping(config->address,
@@ -201,8 +196,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
201 if (err) 196 if (err)
202 goto out; 197 goto out;
203 198
204 vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN, 199 vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
205 vdev, true, (void *) config->address, 200 vdev, (void *) config->address,
206 kvm_notify, callback, name); 201 kvm_notify, callback, name);
207 if (!vq) { 202 if (!vq) {
208 err = -ENOMEM; 203 err = -ENOMEM;
@@ -267,11 +262,6 @@ error:
267 return PTR_ERR(vqs[i]); 262 return PTR_ERR(vqs[i]);
268} 263}
269 264
270static const char *kvm_bus_name(struct virtio_device *vdev)
271{
272 return "";
273}
274
275/* 265/*
276 * The config ops structure as defined by virtio config 266 * The config ops structure as defined by virtio config
277 */ 267 */
@@ -285,7 +275,6 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
285 .reset = kvm_reset, 275 .reset = kvm_reset,
286 .find_vqs = kvm_find_vqs, 276 .find_vqs = kvm_find_vqs,
287 .del_vqs = kvm_del_vqs, 277 .del_vqs = kvm_del_vqs,
288 .bus_name = kvm_bus_name,
289}; 278};
290 279
291/* 280/*
@@ -345,10 +334,10 @@ static void scan_devices(void)
345 */ 334 */
346static int match_desc(struct device *dev, void *data) 335static int match_desc(struct device *dev, void *data)
347{ 336{
348 struct virtio_device *vdev = dev_to_virtio(dev); 337 if ((ulong)to_kvmdev(dev_to_virtio(dev))->desc == (ulong)data)
349 struct kvm_device *kdev = to_kvmdev(vdev); 338 return 1;
350 339
351 return kdev->desc == data; 340 return 0;
352} 341}
353 342
354/* 343/*
@@ -384,15 +373,17 @@ static void hotplug_devices(struct work_struct *dummy)
384/* 373/*
385 * we emulate the request_irq behaviour on top of s390 extints 374 * we emulate the request_irq behaviour on top of s390 extints
386 */ 375 */
387static void kvm_extint_handler(struct ext_code ext_code, 376static void kvm_extint_handler(unsigned int ext_int_code,
388 unsigned int param32, unsigned long param64) 377 unsigned int param32, unsigned long param64)
389{ 378{
390 struct virtqueue *vq; 379 struct virtqueue *vq;
380 u16 subcode;
391 u32 param; 381 u32 param;
392 382
393 if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64) 383 subcode = ext_int_code >> 16;
384 if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
394 return; 385 return;
395 inc_irq_stat(IRQEXT_VRT); 386 kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
396 387
397 /* The LSB might be overloaded, we have to mask it */ 388 /* The LSB might be overloaded, we have to mask it */
398 vq = (struct virtqueue *)(param64 & ~1UL); 389 vq = (struct virtqueue *)(param64 & ~1UL);
@@ -472,7 +463,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
472 463
473static int __init s390_virtio_console_init(void) 464static int __init s390_virtio_console_init(void)
474{ 465{
475 if (sclp_has_vt220() || sclp_has_linemode()) 466 if (!MACHINE_IS_KVM)
476 return -ENODEV; 467 return -ENODEV;
477 return virtio_cons_early_init(early_put_chars); 468 return virtio_cons_early_init(early_put_chars);
478} 469}
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index dfda748c400..fa80ba1f034 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -4,10 +4,11 @@ menu "S/390 network device drivers"
4config LCS 4config LCS
5 def_tristate m 5 def_tristate m
6 prompt "Lan Channel Station Interface" 6 prompt "Lan Channel Station Interface"
7 depends on CCW && NETDEVICES && (ETHERNET || FDDI) 7 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
8 help 8 help
9 Select this option if you want to use LCS networking on IBM System z. 9 Select this option if you want to use LCS networking on IBM System z.
10 This device driver supports FDDI (IEEE 802.7) and Ethernet. 10 This device driver supports Token Ring (IEEE 802.5),
11 FDDI (IEEE 802.7) and Ethernet.
11 To compile as a module, choose M. The module name is lcs. 12 To compile as a module, choose M. The module name is lcs.
12 If you do not know what it is, it's safe to choose Y. 13 If you do not know what it is, it's safe to choose Y.
13 14
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 83bc9c5fa0c..f1fa2483ae6 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * drivers/s390/net/claw.c
2 * ESCON CLAW network driver 3 * ESCON CLAW network driver
3 * 4 *
4 * Linux for zSeries version 5 * Linux for zSeries version
@@ -62,6 +63,7 @@
62 63
63#define KMSG_COMPONENT "claw" 64#define KMSG_COMPONENT "claw"
64 65
66#include <linux/kernel_stat.h>
65#include <asm/ccwdev.h> 67#include <asm/ccwdev.h>
66#include <asm/ccwgroup.h> 68#include <asm/ccwgroup.h>
67#include <asm/debug.h> 69#include <asm/debug.h>
@@ -135,6 +137,7 @@ static inline void
135claw_set_busy(struct net_device *dev) 137claw_set_busy(struct net_device *dev)
136{ 138{
137 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; 139 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
140 eieio();
138} 141}
139 142
140static inline void 143static inline void
@@ -142,11 +145,13 @@ claw_clear_busy(struct net_device *dev)
142{ 145{
143 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); 146 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
144 netif_wake_queue(dev); 147 netif_wake_queue(dev);
148 eieio();
145} 149}
146 150
147static inline int 151static inline int
148claw_check_busy(struct net_device *dev) 152claw_check_busy(struct net_device *dev)
149{ 153{
154 eieio();
150 return ((struct claw_privbk *) dev->ml_priv)->tbusy; 155 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
151} 156}
152 157
@@ -229,6 +234,8 @@ static ssize_t claw_rbuff_show(struct device *dev,
229static ssize_t claw_rbuff_write(struct device *dev, 234static ssize_t claw_rbuff_write(struct device *dev,
230 struct device_attribute *attr, 235 struct device_attribute *attr,
231 const char *buf, size_t count); 236 const char *buf, size_t count);
237static int claw_add_files(struct device *dev);
238static void claw_remove_files(struct device *dev);
232 239
233/* Functions for System Validate */ 240/* Functions for System Validate */
234static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw); 241static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
@@ -261,10 +268,12 @@ static struct ccwgroup_driver claw_group_driver = {
261 .owner = THIS_MODULE, 268 .owner = THIS_MODULE,
262 .name = "claw", 269 .name = "claw",
263 }, 270 },
264 .setup = claw_probe, 271 .max_slaves = 2,
265 .remove = claw_remove_device, 272 .driver_id = 0xC3D3C1E6,
266 .set_online = claw_new_device, 273 .probe = claw_probe,
267 .set_offline = claw_shutdown_device, 274 .remove = claw_remove_device,
275 .set_online = claw_new_device,
276 .set_offline = claw_shutdown_device,
268 .prepare = claw_pm_prepare, 277 .prepare = claw_pm_prepare,
269}; 278};
270 279
@@ -282,27 +291,32 @@ static struct ccw_driver claw_ccw_driver = {
282 .ids = claw_ids, 291 .ids = claw_ids,
283 .probe = ccwgroup_probe_ccwdev, 292 .probe = ccwgroup_probe_ccwdev,
284 .remove = ccwgroup_remove_ccwdev, 293 .remove = ccwgroup_remove_ccwdev,
285 .int_class = IRQIO_CLW,
286}; 294};
287 295
288static ssize_t claw_driver_group_store(struct device_driver *ddrv, 296static ssize_t
289 const char *buf, size_t count) 297claw_driver_group_store(struct device_driver *ddrv, const char *buf,
298 size_t count)
290{ 299{
291 int err; 300 int err;
292 err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf); 301 err = ccwgroup_create_from_string(claw_root_dev,
302 claw_group_driver.driver_id,
303 &claw_ccw_driver, 2, buf);
293 return err ? err : count; 304 return err ? err : count;
294} 305}
306
295static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store); 307static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
296 308
297static struct attribute *claw_drv_attrs[] = { 309static struct attribute *claw_group_attrs[] = {
298 &driver_attr_group.attr, 310 &driver_attr_group.attr,
299 NULL, 311 NULL,
300}; 312};
301static struct attribute_group claw_drv_attr_group = { 313
302 .attrs = claw_drv_attrs, 314static struct attribute_group claw_group_attr_group = {
315 .attrs = claw_group_attrs,
303}; 316};
304static const struct attribute_group *claw_drv_attr_groups[] = { 317
305 &claw_drv_attr_group, 318static const struct attribute_group *claw_group_attr_groups[] = {
319 &claw_group_attr_group,
306 NULL, 320 NULL,
307}; 321};
308 322
@@ -310,6 +324,60 @@ static const struct attribute_group *claw_drv_attr_groups[] = {
310* Key functions 324* Key functions
311*/ 325*/
312 326
327/*----------------------------------------------------------------*
328 * claw_probe *
329 * this function is called for each CLAW device. *
330 *----------------------------------------------------------------*/
331static int
332claw_probe(struct ccwgroup_device *cgdev)
333{
334 int rc;
335 struct claw_privbk *privptr=NULL;
336
337 CLAW_DBF_TEXT(2, setup, "probe");
338 if (!get_device(&cgdev->dev))
339 return -ENODEV;
340 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
341 dev_set_drvdata(&cgdev->dev, privptr);
342 if (privptr == NULL) {
343 probe_error(cgdev);
344 put_device(&cgdev->dev);
345 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
346 return -ENOMEM;
347 }
348 privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
349 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
350 if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
351 probe_error(cgdev);
352 put_device(&cgdev->dev);
353 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
354 return -ENOMEM;
355 }
356 memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
357 memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
358 memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
359 privptr->p_env->packing = 0;
360 privptr->p_env->write_buffers = 5;
361 privptr->p_env->read_buffers = 5;
362 privptr->p_env->read_size = CLAW_FRAME_SIZE;
363 privptr->p_env->write_size = CLAW_FRAME_SIZE;
364 rc = claw_add_files(&cgdev->dev);
365 if (rc) {
366 probe_error(cgdev);
367 put_device(&cgdev->dev);
368 dev_err(&cgdev->dev, "Creating the /proc files for a new"
369 " CLAW device failed\n");
370 CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
371 return rc;
372 }
373 privptr->p_env->p_priv = privptr;
374 cgdev->cdev[0]->handler = claw_irq_handler;
375 cgdev->cdev[1]->handler = claw_irq_handler;
376 CLAW_DBF_TEXT(2, setup, "prbext 0");
377
378 return 0;
379} /* end of claw_probe */
380
313/*-------------------------------------------------------------------* 381/*-------------------------------------------------------------------*
314 * claw_tx * 382 * claw_tx *
315 *-------------------------------------------------------------------*/ 383 *-------------------------------------------------------------------*/
@@ -577,6 +645,7 @@ claw_irq_handler(struct ccw_device *cdev,
577 struct claw_env *p_env; 645 struct claw_env *p_env;
578 struct chbk *p_ch_r=NULL; 646 struct chbk *p_ch_r=NULL;
579 647
648 kstat_cpu(smp_processor_id()).irqs[IOINT_CLW]++;
580 CLAW_DBF_TEXT(4, trace, "clawirq"); 649 CLAW_DBF_TEXT(4, trace, "clawirq");
581 /* Bypass all 'unsolicited interrupts' */ 650 /* Bypass all 'unsolicited interrupts' */
582 privptr = dev_get_drvdata(&cdev->dev); 651 privptr = dev_get_drvdata(&cdev->dev);
@@ -3018,11 +3087,14 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3018{ 3087{
3019 struct claw_privbk *priv; 3088 struct claw_privbk *priv;
3020 3089
3090 BUG_ON(!cgdev);
3021 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); 3091 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3022 priv = dev_get_drvdata(&cgdev->dev); 3092 priv = dev_get_drvdata(&cgdev->dev);
3093 BUG_ON(!priv);
3023 dev_info(&cgdev->dev, " will be removed.\n"); 3094 dev_info(&cgdev->dev, " will be removed.\n");
3024 if (cgdev->state == CCWGROUP_ONLINE) 3095 if (cgdev->state == CCWGROUP_ONLINE)
3025 claw_shutdown_device(cgdev); 3096 claw_shutdown_device(cgdev);
3097 claw_remove_files(&cgdev->dev);
3026 kfree(priv->p_mtc_envelope); 3098 kfree(priv->p_mtc_envelope);
3027 priv->p_mtc_envelope=NULL; 3099 priv->p_mtc_envelope=NULL;
3028 kfree(priv->p_env); 3100 kfree(priv->p_env);
@@ -3250,6 +3322,7 @@ claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3250 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers); 3322 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3251 return count; 3323 return count;
3252} 3324}
3325
3253static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write); 3326static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3254 3327
3255static struct attribute *claw_attr[] = { 3328static struct attribute *claw_attr[] = {
@@ -3260,73 +3333,40 @@ static struct attribute *claw_attr[] = {
3260 &dev_attr_host_name.attr, 3333 &dev_attr_host_name.attr,
3261 NULL, 3334 NULL,
3262}; 3335};
3336
3263static struct attribute_group claw_attr_group = { 3337static struct attribute_group claw_attr_group = {
3264 .attrs = claw_attr, 3338 .attrs = claw_attr,
3265}; 3339};
3266static const struct attribute_group *claw_attr_groups[] = {
3267 &claw_attr_group,
3268 NULL,
3269};
3270static const struct device_type claw_devtype = {
3271 .name = "claw",
3272 .groups = claw_attr_groups,
3273};
3274 3340
3275/*----------------------------------------------------------------* 3341static int
3276 * claw_probe * 3342claw_add_files(struct device *dev)
3277 * this function is called for each CLAW device. *
3278 *----------------------------------------------------------------*/
3279static int claw_probe(struct ccwgroup_device *cgdev)
3280{ 3343{
3281 struct claw_privbk *privptr = NULL; 3344 CLAW_DBF_TEXT(2, setup, "add_file");
3282 3345 return sysfs_create_group(&dev->kobj, &claw_attr_group);
3283 CLAW_DBF_TEXT(2, setup, "probe"); 3346}
3284 if (!get_device(&cgdev->dev))
3285 return -ENODEV;
3286 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
3287 dev_set_drvdata(&cgdev->dev, privptr);
3288 if (privptr == NULL) {
3289 probe_error(cgdev);
3290 put_device(&cgdev->dev);
3291 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3292 return -ENOMEM;
3293 }
3294 privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
3295 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
3296 if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
3297 probe_error(cgdev);
3298 put_device(&cgdev->dev);
3299 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3300 return -ENOMEM;
3301 }
3302 memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
3303 memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
3304 memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
3305 privptr->p_env->packing = 0;
3306 privptr->p_env->write_buffers = 5;
3307 privptr->p_env->read_buffers = 5;
3308 privptr->p_env->read_size = CLAW_FRAME_SIZE;
3309 privptr->p_env->write_size = CLAW_FRAME_SIZE;
3310 privptr->p_env->p_priv = privptr;
3311 cgdev->cdev[0]->handler = claw_irq_handler;
3312 cgdev->cdev[1]->handler = claw_irq_handler;
3313 cgdev->dev.type = &claw_devtype;
3314 CLAW_DBF_TEXT(2, setup, "prbext 0");
3315 3347
3316 return 0; 3348static void
3317} /* end of claw_probe */ 3349claw_remove_files(struct device *dev)
3350{
3351 CLAW_DBF_TEXT(2, setup, "rem_file");
3352 sysfs_remove_group(&dev->kobj, &claw_attr_group);
3353}
3318 3354
3319/*--------------------------------------------------------------------* 3355/*--------------------------------------------------------------------*
3320* claw_init and cleanup * 3356* claw_init and cleanup *
3321*---------------------------------------------------------------------*/ 3357*---------------------------------------------------------------------*/
3322 3358
3323static void __exit claw_cleanup(void) 3359static void __exit
3360claw_cleanup(void)
3324{ 3361{
3362 driver_remove_file(&claw_group_driver.driver,
3363 &driver_attr_group);
3325 ccwgroup_driver_unregister(&claw_group_driver); 3364 ccwgroup_driver_unregister(&claw_group_driver);
3326 ccw_driver_unregister(&claw_ccw_driver); 3365 ccw_driver_unregister(&claw_ccw_driver);
3327 root_device_unregister(claw_root_dev); 3366 root_device_unregister(claw_root_dev);
3328 claw_unregister_debug_facility(); 3367 claw_unregister_debug_facility();
3329 pr_info("Driver unloaded\n"); 3368 pr_info("Driver unloaded\n");
3369
3330} 3370}
3331 3371
3332/** 3372/**
@@ -3335,7 +3375,8 @@ static void __exit claw_cleanup(void)
3335 * 3375 *
3336 * @return 0 on success, !0 on error. 3376 * @return 0 on success, !0 on error.
3337 */ 3377 */
3338static int __init claw_init(void) 3378static int __init
3379claw_init(void)
3339{ 3380{
3340 int ret = 0; 3381 int ret = 0;
3341 3382
@@ -3354,7 +3395,7 @@ static int __init claw_init(void)
3354 ret = ccw_driver_register(&claw_ccw_driver); 3395 ret = ccw_driver_register(&claw_ccw_driver);
3355 if (ret) 3396 if (ret)
3356 goto ccw_err; 3397 goto ccw_err;
3357 claw_group_driver.driver.groups = claw_drv_attr_groups; 3398 claw_group_driver.driver.groups = claw_group_attr_groups;
3358 ret = ccwgroup_driver_register(&claw_group_driver); 3399 ret = ccwgroup_driver_register(&claw_group_driver);
3359 if (ret) 3400 if (ret)
3360 goto ccwgroup_err; 3401 goto ccwgroup_err;
@@ -3377,5 +3418,5 @@ module_exit(claw_cleanup);
3377 3418
3378MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); 3419MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3379MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \ 3420MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3380 "Copyright IBM Corp. 2000, 2008\n"); 3421 "Copyright 2000,2008 IBM Corporation\n");
3381MODULE_LICENSE("GPL"); 3422MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index 6514e1cb3f1..d962fd741a2 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_dbug.c
3 *
2 * Copyright IBM Corp. 2001, 2007 4 * Copyright IBM Corp. 2001, 2007
3 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) 5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
4 * 6 *
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
index 47bf0501995..26966d0b9ab 100644
--- a/drivers/s390/net/ctcm_dbug.h
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_dbug.h
3 *
2 * Copyright IBM Corp. 2001, 2007 4 * Copyright IBM Corp. 2001, 2007
3 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) 5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
4 * 6 *
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index fb92524d24e..2d602207541 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_fsms.c
3 *
2 * Copyright IBM Corp. 2001, 2007 4 * Copyright IBM Corp. 2001, 2007
3 * Authors: Fritz Elfert (felfert@millenux.com) 5 * Authors: Fritz Elfert (felfert@millenux.com)
4 * Peter Tiedemann (ptiedem@de.ibm.com) 6 * Peter Tiedemann (ptiedem@de.ibm.com)
@@ -1339,12 +1341,6 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1339 1341
1340 spin_unlock(&ch->collect_lock); 1342 spin_unlock(&ch->collect_lock);
1341 clear_normalized_cda(&ch->ccw[1]); 1343 clear_normalized_cda(&ch->ccw[1]);
1342
1343 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1344 (void *)(unsigned long)ch->ccw[1].cda,
1345 ch->trans_skb->data);
1346 ch->ccw[1].count = ch->max_bufsize;
1347
1348 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { 1344 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1349 dev_kfree_skb_any(ch->trans_skb); 1345 dev_kfree_skb_any(ch->trans_skb);
1350 ch->trans_skb = NULL; 1346 ch->trans_skb = NULL;
@@ -1354,11 +1350,6 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1354 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1350 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1355 return; 1351 return;
1356 } 1352 }
1357
1358 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1359 (void *)(unsigned long)ch->ccw[1].cda,
1360 ch->trans_skb->data);
1361
1362 ch->ccw[1].count = ch->trans_skb->len; 1353 ch->ccw[1].count = ch->trans_skb->len;
1363 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 1354 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1364 ch->prof.send_stamp = current_kernel_time(); /* xtime */ 1355 ch->prof.send_stamp = current_kernel_time(); /* xtime */
@@ -1523,7 +1514,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1523 goto done; 1514 goto done;
1524 default: 1515 default:
1525 break; 1516 break;
1526 } 1517 };
1527 1518
1528 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 1519 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1529 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1520 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index c963d04799c..046d077fabb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_fsms.h
3 *
2 * Copyright IBM Corp. 2001, 2007 4 * Copyright IBM Corp. 2001, 2007
3 * Authors: Fritz Elfert (felfert@millenux.com) 5 * Authors: Fritz Elfert (felfert@millenux.com)
4 * Peter Tiedemann (ptiedem@de.ibm.com) 6 * Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 676f12049a3..426787efc49 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_main.c
3 *
2 * Copyright IBM Corp. 2001, 2009 4 * Copyright IBM Corp. 2001, 2009
3 * Author(s): 5 * Author(s):
4 * Original CTC driver(s): 6 * Original CTC driver(s):
@@ -22,6 +24,7 @@
22#define KMSG_COMPONENT "ctcm" 24#define KMSG_COMPONENT "ctcm"
23#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24 26
27#include <linux/kernel_stat.h>
25#include <linux/module.h> 28#include <linux/module.h>
26#include <linux/init.h> 29#include <linux/init.h>
27#include <linux/kernel.h> 30#include <linux/kernel.h>
@@ -560,9 +563,6 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
560 skb_queue_tail(&ch->io_queue, skb); 563 skb_queue_tail(&ch->io_queue, skb);
561 ccw_idx = 3; 564 ccw_idx = 3;
562 } 565 }
563 if (do_debug_ccw)
564 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
565 sizeof(struct ccw1) * 3);
566 ch->retry = 0; 566 ch->retry = 0;
567 fsm_newstate(ch->fsm, CTC_STATE_TX); 567 fsm_newstate(ch->fsm, CTC_STATE_TX);
568 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 568 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
@@ -1203,6 +1203,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1203 int cstat; 1203 int cstat;
1204 int dstat; 1204 int dstat;
1205 1205
1206 kstat_cpu(smp_processor_id()).irqs[IOINT_CTC]++;
1206 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1207 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1207 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev)); 1208 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1208 1209
@@ -1294,11 +1295,6 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
1294 1295
1295} 1296}
1296 1297
1297static const struct device_type ctcm_devtype = {
1298 .name = "ctcm",
1299 .groups = ctcm_attr_groups,
1300};
1301
1302/** 1298/**
1303 * Add ctcm specific attributes. 1299 * Add ctcm specific attributes.
1304 * Add ctcm private data. 1300 * Add ctcm private data.
@@ -1310,6 +1306,7 @@ static const struct device_type ctcm_devtype = {
1310static int ctcm_probe_device(struct ccwgroup_device *cgdev) 1306static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1311{ 1307{
1312 struct ctcm_priv *priv; 1308 struct ctcm_priv *priv;
1309 int rc;
1313 1310
1314 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1311 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1315 "%s %p", 1312 "%s %p",
@@ -1326,11 +1323,17 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1326 put_device(&cgdev->dev); 1323 put_device(&cgdev->dev);
1327 return -ENOMEM; 1324 return -ENOMEM;
1328 } 1325 }
1326
1327 rc = ctcm_add_files(&cgdev->dev);
1328 if (rc) {
1329 kfree(priv);
1330 put_device(&cgdev->dev);
1331 return rc;
1332 }
1329 priv->buffer_size = CTCM_BUFSIZE_DEFAULT; 1333 priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
1330 cgdev->cdev[0]->handler = ctcm_irq_handler; 1334 cgdev->cdev[0]->handler = ctcm_irq_handler;
1331 cgdev->cdev[1]->handler = ctcm_irq_handler; 1335 cgdev->cdev[1]->handler = ctcm_irq_handler;
1332 dev_set_drvdata(&cgdev->dev, priv); 1336 dev_set_drvdata(&cgdev->dev, priv);
1333 cgdev->dev.type = &ctcm_devtype;
1334 1337
1335 return 0; 1338 return 0;
1336} 1339}
@@ -1454,7 +1457,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1454 ch_fsm_len, GFP_KERNEL); 1457 ch_fsm_len, GFP_KERNEL);
1455 } 1458 }
1456 if (ch->fsm == NULL) 1459 if (ch->fsm == NULL)
1457 goto nomem_return; 1460 goto free_return;
1458 1461
1459 fsm_newstate(ch->fsm, CTC_STATE_IDLE); 1462 fsm_newstate(ch->fsm, CTC_STATE_IDLE);
1460 1463
@@ -1607,6 +1610,11 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1607 goto out_dev; 1610 goto out_dev;
1608 } 1611 }
1609 1612
1613 if (ctcm_add_attributes(&cgdev->dev)) {
1614 result = -ENODEV;
1615 goto out_unregister;
1616 }
1617
1610 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); 1618 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
1611 1619
1612 dev_info(&dev->dev, 1620 dev_info(&dev->dev,
@@ -1620,6 +1628,8 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1620 priv->channel[CTCM_WRITE]->id, priv->protocol); 1628 priv->channel[CTCM_WRITE]->id, priv->protocol);
1621 1629
1622 return 0; 1630 return 0;
1631out_unregister:
1632 unregister_netdev(dev);
1623out_dev: 1633out_dev:
1624 ctcm_free_netdevice(dev); 1634 ctcm_free_netdevice(dev);
1625out_ccw2: 1635out_ccw2:
@@ -1658,6 +1668,7 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1658 /* Close the device */ 1668 /* Close the device */
1659 ctcm_close(dev); 1669 ctcm_close(dev);
1660 dev->flags &= ~IFF_RUNNING; 1670 dev->flags &= ~IFF_RUNNING;
1671 ctcm_remove_attributes(&cgdev->dev);
1661 channel_free(priv->channel[CTCM_READ]); 1672 channel_free(priv->channel[CTCM_READ]);
1662 } else 1673 } else
1663 dev = NULL; 1674 dev = NULL;
@@ -1691,12 +1702,15 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev)
1691{ 1702{
1692 struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev); 1703 struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
1693 1704
1705 BUG_ON(priv == NULL);
1706
1694 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1707 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1695 "removing device %p, proto : %d", 1708 "removing device %p, proto : %d",
1696 cgdev, priv->protocol); 1709 cgdev, priv->protocol);
1697 1710
1698 if (cgdev->state == CCWGROUP_ONLINE) 1711 if (cgdev->state == CCWGROUP_ONLINE)
1699 ctcm_shutdown_device(cgdev); 1712 ctcm_shutdown_device(cgdev);
1713 ctcm_remove_files(&cgdev->dev);
1700 dev_set_drvdata(&cgdev->dev, NULL); 1714 dev_set_drvdata(&cgdev->dev, NULL);
1701 kfree(priv); 1715 kfree(priv);
1702 put_device(&cgdev->dev); 1716 put_device(&cgdev->dev);
@@ -1755,7 +1769,6 @@ static struct ccw_driver ctcm_ccw_driver = {
1755 .ids = ctcm_ids, 1769 .ids = ctcm_ids,
1756 .probe = ccwgroup_probe_ccwdev, 1770 .probe = ccwgroup_probe_ccwdev,
1757 .remove = ccwgroup_remove_ccwdev, 1771 .remove = ccwgroup_remove_ccwdev,
1758 .int_class = IRQIO_CTC,
1759}; 1772};
1760 1773
1761static struct ccwgroup_driver ctcm_group_driver = { 1774static struct ccwgroup_driver ctcm_group_driver = {
@@ -1763,7 +1776,9 @@ static struct ccwgroup_driver ctcm_group_driver = {
1763 .owner = THIS_MODULE, 1776 .owner = THIS_MODULE,
1764 .name = CTC_DRIVER_NAME, 1777 .name = CTC_DRIVER_NAME,
1765 }, 1778 },
1766 .setup = ctcm_probe_device, 1779 .max_slaves = 2,
1780 .driver_id = 0xC3E3C3D4, /* CTCM */
1781 .probe = ctcm_probe_device,
1767 .remove = ctcm_remove_device, 1782 .remove = ctcm_remove_device,
1768 .set_online = ctcm_new_device, 1783 .set_online = ctcm_new_device,
1769 .set_offline = ctcm_shutdown_device, 1784 .set_offline = ctcm_shutdown_device,
@@ -1772,25 +1787,31 @@ static struct ccwgroup_driver ctcm_group_driver = {
1772 .restore = ctcm_pm_resume, 1787 .restore = ctcm_pm_resume,
1773}; 1788};
1774 1789
1775static ssize_t ctcm_driver_group_store(struct device_driver *ddrv, 1790static ssize_t
1776 const char *buf, size_t count) 1791ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
1792 size_t count)
1777{ 1793{
1778 int err; 1794 int err;
1779 1795
1780 err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf); 1796 err = ccwgroup_create_from_string(ctcm_root_dev,
1797 ctcm_group_driver.driver_id,
1798 &ctcm_ccw_driver, 2, buf);
1781 return err ? err : count; 1799 return err ? err : count;
1782} 1800}
1801
1783static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store); 1802static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1784 1803
1785static struct attribute *ctcm_drv_attrs[] = { 1804static struct attribute *ctcm_group_attrs[] = {
1786 &driver_attr_group.attr, 1805 &driver_attr_group.attr,
1787 NULL, 1806 NULL,
1788}; 1807};
1789static struct attribute_group ctcm_drv_attr_group = { 1808
1790 .attrs = ctcm_drv_attrs, 1809static struct attribute_group ctcm_group_attr_group = {
1810 .attrs = ctcm_group_attrs,
1791}; 1811};
1792static const struct attribute_group *ctcm_drv_attr_groups[] = { 1812
1793 &ctcm_drv_attr_group, 1813static const struct attribute_group *ctcm_group_attr_groups[] = {
1814 &ctcm_group_attr_group,
1794 NULL, 1815 NULL,
1795}; 1816};
1796 1817
@@ -1806,6 +1827,7 @@ static const struct attribute_group *ctcm_drv_attr_groups[] = {
1806 */ 1827 */
1807static void __exit ctcm_exit(void) 1828static void __exit ctcm_exit(void)
1808{ 1829{
1830 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1809 ccwgroup_driver_unregister(&ctcm_group_driver); 1831 ccwgroup_driver_unregister(&ctcm_group_driver);
1810 ccw_driver_unregister(&ctcm_ccw_driver); 1832 ccw_driver_unregister(&ctcm_ccw_driver);
1811 root_device_unregister(ctcm_root_dev); 1833 root_device_unregister(ctcm_root_dev);
@@ -1843,7 +1865,7 @@ static int __init ctcm_init(void)
1843 ret = ccw_driver_register(&ctcm_ccw_driver); 1865 ret = ccw_driver_register(&ctcm_ccw_driver);
1844 if (ret) 1866 if (ret)
1845 goto ccw_err; 1867 goto ccw_err;
1846 ctcm_group_driver.driver.groups = ctcm_drv_attr_groups; 1868 ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
1847 ret = ccwgroup_driver_register(&ctcm_group_driver); 1869 ret = ccwgroup_driver_register(&ctcm_group_driver);
1848 if (ret) 1870 if (ret)
1849 goto ccwgroup_err; 1871 goto ccwgroup_err;
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 477c933685f..24d5215eb0c 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_main.h
3 *
2 * Copyright IBM Corp. 2001, 2007 4 * Copyright IBM Corp. 2001, 2007
3 * Authors: Fritz Elfert (felfert@millenux.com) 5 * Authors: Fritz Elfert (felfert@millenux.com)
4 * Peter Tiedemann (ptiedem@de.ibm.com) 6 * Peter Tiedemann (ptiedem@de.ibm.com)
@@ -223,7 +225,13 @@ struct ctcm_priv {
223int ctcm_open(struct net_device *dev); 225int ctcm_open(struct net_device *dev);
224int ctcm_close(struct net_device *dev); 226int ctcm_close(struct net_device *dev);
225 227
226extern const struct attribute_group *ctcm_attr_groups[]; 228/*
229 * prototypes for non-static sysfs functions
230 */
231int ctcm_add_attributes(struct device *dev);
232void ctcm_remove_attributes(struct device *dev);
233int ctcm_add_files(struct device *dev);
234void ctcm_remove_files(struct device *dev);
227 235
228/* 236/*
229 * Compatibility macros for busy handling 237 * Compatibility macros for busy handling
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 2dbc77b5137..da4c747335e 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_mpc.c
3 *
2 * Copyright IBM Corp. 2004, 2007 4 * Copyright IBM Corp. 2004, 2007
3 * Authors: Belinda Thompson (belindat@us.ibm.com) 5 * Authors: Belinda Thompson (belindat@us.ibm.com)
4 * Andy Richter (richtera@us.ibm.com) 6 * Andy Richter (richtera@us.ibm.com)
@@ -51,8 +53,8 @@
51#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
52#include <asm/idals.h> 54#include <asm/idals.h>
53 55
54#include "ctcm_main.h"
55#include "ctcm_mpc.h" 56#include "ctcm_mpc.h"
57#include "ctcm_main.h"
56#include "ctcm_fsms.h" 58#include "ctcm_fsms.h"
57 59
58static const struct xid2 init_xid = { 60static const struct xid2 init_xid = {
@@ -130,7 +132,7 @@ void ctcmpc_dumpit(char *buf, int len)
130 __u32 ct, sw, rm, dup; 132 __u32 ct, sw, rm, dup;
131 char *ptr, *rptr; 133 char *ptr, *rptr;
132 char tbuf[82], tdup[82]; 134 char tbuf[82], tdup[82];
133 #ifdef CONFIG_64BIT 135 #if (UTS_MACHINE == s390x)
134 char addr[22]; 136 char addr[22];
135 #else 137 #else
136 char addr[12]; 138 char addr[12];
@@ -147,8 +149,8 @@ void ctcmpc_dumpit(char *buf, int len)
147 149
148 for (ct = 0; ct < len; ct++, ptr++, rptr++) { 150 for (ct = 0; ct < len; ct++, ptr++, rptr++) {
149 if (sw == 0) { 151 if (sw == 0) {
150 #ifdef CONFIG_64BIT 152 #if (UTS_MACHINE == s390x)
151 sprintf(addr, "%16.16llx", (__u64)rptr); 153 sprintf(addr, "%16.16lx", (__u64)rptr);
152 #else 154 #else
153 sprintf(addr, "%8.8X", (__u32)rptr); 155 sprintf(addr, "%8.8X", (__u32)rptr);
154 #endif 156 #endif
@@ -162,8 +164,8 @@ void ctcmpc_dumpit(char *buf, int len)
162 if (sw == 8) 164 if (sw == 8)
163 strcat(bhex, " "); 165 strcat(bhex, " ");
164 166
165 #if CONFIG_64BIT 167 #if (UTS_MACHINE == s390x)
166 sprintf(tbuf, "%2.2llX", (__u64)*ptr); 168 sprintf(tbuf, "%2.2lX", (__u64)*ptr);
167 #else 169 #else
168 sprintf(tbuf, "%2.2X", (__u32)*ptr); 170 sprintf(tbuf, "%2.2X", (__u32)*ptr);
169 #endif 171 #endif
@@ -1367,6 +1369,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1367 struct mpc_group *grp; 1369 struct mpc_group *grp;
1368 struct channel *wch; 1370 struct channel *wch;
1369 1371
1372 BUG_ON(dev == NULL);
1370 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); 1373 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
1371 1374
1372 priv = dev->ml_priv; 1375 priv = dev->ml_priv;
@@ -1471,6 +1474,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
1471 struct channel *wch; 1474 struct channel *wch;
1472 struct channel *rch; 1475 struct channel *rch;
1473 1476
1477 BUG_ON(dev == NULL);
1478
1474 priv = dev->ml_priv; 1479 priv = dev->ml_priv;
1475 grp = priv->mpcg; 1480 grp = priv->mpcg;
1476 wch = priv->channel[CTCM_WRITE]; 1481 wch = priv->channel[CTCM_WRITE];
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index bd1b1cc54ff..1fa07b0c11c 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_mpc.h
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) 5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
4 * 6 *
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 985b5dcbdac..8305319b2a8 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/ctcm_sysfs.c
3 *
2 * Copyright IBM Corp. 2007, 2007 4 * Copyright IBM Corp. 2007, 2007
3 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) 5 * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
4 * 6 *
@@ -11,7 +13,6 @@
11#define KMSG_COMPONENT "ctcm" 13#define KMSG_COMPONENT "ctcm"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 15
14#include <linux/device.h>
15#include <linux/sysfs.h> 16#include <linux/sysfs.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include "ctcm_main.h" 18#include "ctcm_main.h"
@@ -107,12 +108,10 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
107} 108}
108 109
109static ssize_t stats_show(struct device *dev, 110static ssize_t stats_show(struct device *dev,
110 struct device_attribute *attr, char *buf) 111 struct device_attribute *attr, char *buf)
111{ 112{
112 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
113 struct ctcm_priv *priv = dev_get_drvdata(dev); 113 struct ctcm_priv *priv = dev_get_drvdata(dev);
114 114 if (!priv)
115 if (!priv || gdev->state != CCWGROUP_ONLINE)
116 return -ENODEV; 115 return -ENODEV;
117 ctcm_print_statistics(priv); 116 ctcm_print_statistics(priv);
118 return sprintf(buf, "0\n"); 117 return sprintf(buf, "0\n");
@@ -160,7 +159,7 @@ static ssize_t ctcm_proto_store(struct device *dev,
160 return count; 159 return count;
161} 160}
162 161
163static const char *ctcm_type[] = { 162const char *ctcm_type[] = {
164 "not a channel", 163 "not a channel",
165 "CTC/A", 164 "CTC/A",
166 "FICON channel", 165 "FICON channel",
@@ -191,14 +190,34 @@ static struct attribute *ctcm_attr[] = {
191 &dev_attr_protocol.attr, 190 &dev_attr_protocol.attr,
192 &dev_attr_type.attr, 191 &dev_attr_type.attr,
193 &dev_attr_buffer.attr, 192 &dev_attr_buffer.attr,
194 &dev_attr_stats.attr,
195 NULL, 193 NULL,
196}; 194};
197 195
198static struct attribute_group ctcm_attr_group = { 196static struct attribute_group ctcm_attr_group = {
199 .attrs = ctcm_attr, 197 .attrs = ctcm_attr,
200}; 198};
201const struct attribute_group *ctcm_attr_groups[] = { 199
202 &ctcm_attr_group, 200int ctcm_add_attributes(struct device *dev)
203 NULL, 201{
204}; 202 int rc;
203
204 rc = device_create_file(dev, &dev_attr_stats);
205
206 return rc;
207}
208
209void ctcm_remove_attributes(struct device *dev)
210{
211 device_remove_file(dev, &dev_attr_stats);
212}
213
214int ctcm_add_files(struct device *dev)
215{
216 return sysfs_create_group(&dev->kobj, &ctcm_attr_group);
217}
218
219void ctcm_remove_files(struct device *dev)
220{
221 sysfs_remove_group(&dev->kobj, &ctcm_attr_group);
222}
223
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index c645dc9e98a..c3b8064a102 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -26,10 +26,12 @@
26#define KMSG_COMPONENT "lcs" 26#define KMSG_COMPONENT "lcs"
27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 27#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28 28
29#include <linux/kernel_stat.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/if.h> 31#include <linux/if.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
32#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
34#include <linux/trdevice.h>
33#include <linux/fddidevice.h> 35#include <linux/fddidevice.h>
34#include <linux/inetdevice.h> 36#include <linux/inetdevice.h>
35#include <linux/in.h> 37#include <linux/in.h>
@@ -49,7 +51,8 @@
49#include "lcs.h" 51#include "lcs.h"
50 52
51 53
52#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI) 54#if !defined(CONFIG_NET_ETHERNET) && \
55 !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
53#error Cannot compile lcs.c without some net devices switched on. 56#error Cannot compile lcs.c without some net devices switched on.
54#endif 57#endif
55 58
@@ -282,7 +285,7 @@ lcs_setup_write_ccws(struct lcs_card *card)
282 285
283 LCS_DBF_TEXT(3, setup, "iwritccw"); 286 LCS_DBF_TEXT(3, setup, "iwritccw");
284 /* Setup write ccws. */ 287 /* Setup write ccws. */
285 memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1)); 288 memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
286 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { 289 for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
287 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; 290 card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
288 card->write.ccws[cnt].count = 0; 291 card->write.ccws[cnt].count = 0;
@@ -1164,7 +1167,10 @@ static void
1164lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) 1167lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
1165{ 1168{
1166 LCS_DBF_TEXT(4,trace, "getmac"); 1169 LCS_DBF_TEXT(4,trace, "getmac");
1167 ip_eth_mc_map(ipm, mac); 1170 if (dev->type == ARPHRD_IEEE802_TR)
1171 ip_tr_mc_map(ipm, mac);
1172 else
1173 ip_eth_mc_map(ipm, mac);
1168} 1174}
1169 1175
1170/** 1176/**
@@ -1393,6 +1399,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1393 int rc, index; 1399 int rc, index;
1394 int cstat, dstat; 1400 int cstat, dstat;
1395 1401
1402 kstat_cpu(smp_processor_id()).irqs[IOINT_LCS]++;
1396 if (lcs_check_irb_error(cdev, irb)) 1403 if (lcs_check_irb_error(cdev, irb))
1397 return; 1404 return;
1398 1405
@@ -1629,13 +1636,19 @@ lcs_startlan_auto(struct lcs_card *card)
1629 int rc; 1636 int rc;
1630 1637
1631 LCS_DBF_TEXT(2, trace, "strtauto"); 1638 LCS_DBF_TEXT(2, trace, "strtauto");
1632#ifdef CONFIG_ETHERNET 1639#ifdef CONFIG_NET_ETHERNET
1633 card->lan_type = LCS_FRAME_TYPE_ENET; 1640 card->lan_type = LCS_FRAME_TYPE_ENET;
1634 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1641 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1635 if (rc == 0) 1642 if (rc == 0)
1636 return 0; 1643 return 0;
1637 1644
1638#endif 1645#endif
1646#ifdef CONFIG_TR
1647 card->lan_type = LCS_FRAME_TYPE_TR;
1648 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
1649 if (rc == 0)
1650 return 0;
1651#endif
1639#ifdef CONFIG_FDDI 1652#ifdef CONFIG_FDDI
1640 card->lan_type = LCS_FRAME_TYPE_FDDI; 1653 card->lan_type = LCS_FRAME_TYPE_FDDI;
1641 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); 1654 rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
@@ -1959,7 +1972,7 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1959 1972
1960static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1973static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1961 1974
1962static const char *lcs_type[] = { 1975const char *lcs_type[] = {
1963 "not a channel", 1976 "not a channel",
1964 "2216 parallel", 1977 "2216 parallel",
1965 "2216 channel", 1978 "2216 channel",
@@ -2040,17 +2053,10 @@ static struct attribute * lcs_attrs[] = {
2040 &dev_attr_recover.attr, 2053 &dev_attr_recover.attr,
2041 NULL, 2054 NULL,
2042}; 2055};
2056
2043static struct attribute_group lcs_attr_group = { 2057static struct attribute_group lcs_attr_group = {
2044 .attrs = lcs_attrs, 2058 .attrs = lcs_attrs,
2045}; 2059};
2046static const struct attribute_group *lcs_attr_groups[] = {
2047 &lcs_attr_group,
2048 NULL,
2049};
2050static const struct device_type lcs_devtype = {
2051 .name = "lcs",
2052 .groups = lcs_attr_groups,
2053};
2054 2060
2055/** 2061/**
2056 * lcs_probe_device is called on establishing a new ccwgroup_device. 2062 * lcs_probe_device is called on establishing a new ccwgroup_device.
@@ -2059,6 +2065,7 @@ static int
2059lcs_probe_device(struct ccwgroup_device *ccwgdev) 2065lcs_probe_device(struct ccwgroup_device *ccwgdev)
2060{ 2066{
2061 struct lcs_card *card; 2067 struct lcs_card *card;
2068 int ret;
2062 2069
2063 if (!get_device(&ccwgdev->dev)) 2070 if (!get_device(&ccwgdev->dev))
2064 return -ENODEV; 2071 return -ENODEV;
@@ -2070,6 +2077,12 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2070 put_device(&ccwgdev->dev); 2077 put_device(&ccwgdev->dev);
2071 return -ENOMEM; 2078 return -ENOMEM;
2072 } 2079 }
2080 ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2081 if (ret) {
2082 lcs_free_card(card);
2083 put_device(&ccwgdev->dev);
2084 return ret;
2085 }
2073 dev_set_drvdata(&ccwgdev->dev, card); 2086 dev_set_drvdata(&ccwgdev->dev, card);
2074 ccwgdev->cdev[0]->handler = lcs_irq; 2087 ccwgdev->cdev[0]->handler = lcs_irq;
2075 ccwgdev->cdev[1]->handler = lcs_irq; 2088 ccwgdev->cdev[1]->handler = lcs_irq;
@@ -2078,9 +2091,7 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev)
2078 card->thread_start_mask = 0; 2091 card->thread_start_mask = 0;
2079 card->thread_allowed_mask = 0; 2092 card->thread_allowed_mask = 0;
2080 card->thread_running_mask = 0; 2093 card->thread_running_mask = 0;
2081 ccwgdev->dev.type = &lcs_devtype; 2094 return 0;
2082
2083 return 0;
2084} 2095}
2085 2096
2086static int 2097static int
@@ -2111,7 +2122,7 @@ static const struct net_device_ops lcs_mc_netdev_ops = {
2111 .ndo_stop = lcs_stop_device, 2122 .ndo_stop = lcs_stop_device,
2112 .ndo_get_stats = lcs_getstats, 2123 .ndo_get_stats = lcs_getstats,
2113 .ndo_start_xmit = lcs_start_xmit, 2124 .ndo_start_xmit = lcs_start_xmit,
2114 .ndo_set_rx_mode = lcs_set_multicast_list, 2125 .ndo_set_multicast_list = lcs_set_multicast_list,
2115}; 2126};
2116 2127
2117static int 2128static int
@@ -2157,12 +2168,18 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2157 goto netdev_out; 2168 goto netdev_out;
2158 } 2169 }
2159 switch (card->lan_type) { 2170 switch (card->lan_type) {
2160#ifdef CONFIG_ETHERNET 2171#ifdef CONFIG_NET_ETHERNET
2161 case LCS_FRAME_TYPE_ENET: 2172 case LCS_FRAME_TYPE_ENET:
2162 card->lan_type_trans = eth_type_trans; 2173 card->lan_type_trans = eth_type_trans;
2163 dev = alloc_etherdev(0); 2174 dev = alloc_etherdev(0);
2164 break; 2175 break;
2165#endif 2176#endif
2177#ifdef CONFIG_TR
2178 case LCS_FRAME_TYPE_TR:
2179 card->lan_type_trans = tr_type_trans;
2180 dev = alloc_trdev(0);
2181 break;
2182#endif
2166#ifdef CONFIG_FDDI 2183#ifdef CONFIG_FDDI
2167 case LCS_FRAME_TYPE_FDDI: 2184 case LCS_FRAME_TYPE_FDDI:
2168 card->lan_type_trans = fddi_type_trans; 2185 card->lan_type_trans = fddi_type_trans;
@@ -2225,7 +2242,7 @@ __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
2225{ 2242{
2226 struct lcs_card *card; 2243 struct lcs_card *card;
2227 enum lcs_dev_states recover_state; 2244 enum lcs_dev_states recover_state;
2228 int ret = 0, ret2 = 0, ret3 = 0; 2245 int ret;
2229 2246
2230 LCS_DBF_TEXT(3, setup, "shtdndev"); 2247 LCS_DBF_TEXT(3, setup, "shtdndev");
2231 card = dev_get_drvdata(&ccwgdev->dev); 2248 card = dev_get_drvdata(&ccwgdev->dev);
@@ -2240,15 +2257,13 @@ __lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
2240 recover_state = card->state; 2257 recover_state = card->state;
2241 2258
2242 ret = lcs_stop_device(card->dev); 2259 ret = lcs_stop_device(card->dev);
2243 ret2 = ccw_device_set_offline(card->read.ccwdev); 2260 ret = ccw_device_set_offline(card->read.ccwdev);
2244 ret3 = ccw_device_set_offline(card->write.ccwdev); 2261 ret = ccw_device_set_offline(card->write.ccwdev);
2245 if (!ret)
2246 ret = (ret2) ? ret2 : ret3;
2247 if (ret)
2248 LCS_DBF_TEXT_(3, setup, "1err:%d", ret);
2249 if (recover_state == DEV_STATE_UP) { 2262 if (recover_state == DEV_STATE_UP) {
2250 card->state = DEV_STATE_RECOVER; 2263 card->state = DEV_STATE_RECOVER;
2251 } 2264 }
2265 if (ret)
2266 return ret;
2252 return 0; 2267 return 0;
2253} 2268}
2254 2269
@@ -2308,9 +2323,9 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev)
2308 } 2323 }
2309 if (card->dev) 2324 if (card->dev)
2310 unregister_netdev(card->dev); 2325 unregister_netdev(card->dev);
2326 sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
2311 lcs_cleanup_card(card); 2327 lcs_cleanup_card(card);
2312 lcs_free_card(card); 2328 lcs_free_card(card);
2313 dev_set_drvdata(&ccwgdev->dev, NULL);
2314 put_device(&ccwgdev->dev); 2329 put_device(&ccwgdev->dev);
2315} 2330}
2316 2331
@@ -2384,7 +2399,6 @@ static struct ccw_driver lcs_ccw_driver = {
2384 .ids = lcs_ids, 2399 .ids = lcs_ids,
2385 .probe = ccwgroup_probe_ccwdev, 2400 .probe = ccwgroup_probe_ccwdev,
2386 .remove = ccwgroup_remove_ccwdev, 2401 .remove = ccwgroup_remove_ccwdev,
2387 .int_class = IRQIO_LCS,
2388}; 2402};
2389 2403
2390/** 2404/**
@@ -2395,7 +2409,9 @@ static struct ccwgroup_driver lcs_group_driver = {
2395 .owner = THIS_MODULE, 2409 .owner = THIS_MODULE,
2396 .name = "lcs", 2410 .name = "lcs",
2397 }, 2411 },
2398 .setup = lcs_probe_device, 2412 .max_slaves = 2,
2413 .driver_id = 0xD3C3E2,
2414 .probe = lcs_probe_device,
2399 .remove = lcs_remove_device, 2415 .remove = lcs_remove_device,
2400 .set_online = lcs_new_device, 2416 .set_online = lcs_new_device,
2401 .set_offline = lcs_shutdown_device, 2417 .set_offline = lcs_shutdown_device,
@@ -2406,24 +2422,30 @@ static struct ccwgroup_driver lcs_group_driver = {
2406 .restore = lcs_restore, 2422 .restore = lcs_restore,
2407}; 2423};
2408 2424
2409static ssize_t lcs_driver_group_store(struct device_driver *ddrv, 2425static ssize_t
2410 const char *buf, size_t count) 2426lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2427 size_t count)
2411{ 2428{
2412 int err; 2429 int err;
2413 err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf); 2430 err = ccwgroup_create_from_string(lcs_root_dev,
2431 lcs_group_driver.driver_id,
2432 &lcs_ccw_driver, 2, buf);
2414 return err ? err : count; 2433 return err ? err : count;
2415} 2434}
2435
2416static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store); 2436static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2417 2437
2418static struct attribute *lcs_drv_attrs[] = { 2438static struct attribute *lcs_group_attrs[] = {
2419 &driver_attr_group.attr, 2439 &driver_attr_group.attr,
2420 NULL, 2440 NULL,
2421}; 2441};
2422static struct attribute_group lcs_drv_attr_group = { 2442
2423 .attrs = lcs_drv_attrs, 2443static struct attribute_group lcs_group_attr_group = {
2444 .attrs = lcs_group_attrs,
2424}; 2445};
2425static const struct attribute_group *lcs_drv_attr_groups[] = { 2446
2426 &lcs_drv_attr_group, 2447static const struct attribute_group *lcs_group_attr_groups[] = {
2448 &lcs_group_attr_group,
2427 NULL, 2449 NULL,
2428}; 2450};
2429 2451
@@ -2447,7 +2469,7 @@ __init lcs_init_module(void)
2447 rc = ccw_driver_register(&lcs_ccw_driver); 2469 rc = ccw_driver_register(&lcs_ccw_driver);
2448 if (rc) 2470 if (rc)
2449 goto ccw_err; 2471 goto ccw_err;
2450 lcs_group_driver.driver.groups = lcs_drv_attr_groups; 2472 lcs_group_driver.driver.groups = lcs_group_attr_groups;
2451 rc = ccwgroup_driver_register(&lcs_group_driver); 2473 rc = ccwgroup_driver_register(&lcs_group_driver);
2452 if (rc) 2474 if (rc)
2453 goto ccwgroup_err; 2475 goto ccwgroup_err;
@@ -2473,6 +2495,8 @@ __exit lcs_cleanup_module(void)
2473{ 2495{
2474 pr_info("Terminating lcs module.\n"); 2496 pr_info("Terminating lcs module.\n");
2475 LCS_DBF_TEXT(0, trace, "cleanup"); 2497 LCS_DBF_TEXT(0, trace, "cleanup");
2498 driver_remove_file(&lcs_group_driver.driver,
2499 &driver_attr_group);
2476 ccwgroup_driver_unregister(&lcs_group_driver); 2500 ccwgroup_driver_unregister(&lcs_group_driver);
2477 ccw_driver_unregister(&lcs_ccw_driver); 2501 ccw_driver_unregister(&lcs_ccw_driver);
2478 root_device_unregister(lcs_root_dev); 2502 root_device_unregister(lcs_root_dev);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 4ffa66c87ea..3251333a23d 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -63,7 +63,6 @@
63 63
64#include <asm/io.h> 64#include <asm/io.h>
65#include <asm/uaccess.h> 65#include <asm/uaccess.h>
66#include <asm/ebcdic.h>
67 66
68#include <net/iucv/iucv.h> 67#include <net/iucv/iucv.h>
69#include "fsm.h" 68#include "fsm.h"
@@ -76,7 +75,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
76 * Debug Facility stuff 75 * Debug Facility stuff
77 */ 76 */
78#define IUCV_DBF_SETUP_NAME "iucv_setup" 77#define IUCV_DBF_SETUP_NAME "iucv_setup"
79#define IUCV_DBF_SETUP_LEN 64 78#define IUCV_DBF_SETUP_LEN 32
80#define IUCV_DBF_SETUP_PAGES 2 79#define IUCV_DBF_SETUP_PAGES 2
81#define IUCV_DBF_SETUP_NR_AREAS 1 80#define IUCV_DBF_SETUP_NR_AREAS 1
82#define IUCV_DBF_SETUP_LEVEL 3 81#define IUCV_DBF_SETUP_LEVEL 3
@@ -227,7 +226,6 @@ struct iucv_connection {
227 struct net_device *netdev; 226 struct net_device *netdev;
228 struct connection_profile prof; 227 struct connection_profile prof;
229 char userid[9]; 228 char userid[9];
230 char userdata[17];
231}; 229};
232 230
233/** 231/**
@@ -265,7 +263,7 @@ struct ll_header {
265}; 263};
266 264
267#define NETIUCV_HDRLEN (sizeof(struct ll_header)) 265#define NETIUCV_HDRLEN (sizeof(struct ll_header))
268#define NETIUCV_BUFSIZE_MAX 65537 266#define NETIUCV_BUFSIZE_MAX 32768
269#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX 267#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
270#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) 268#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
271#define NETIUCV_MTU_DEFAULT 9216 269#define NETIUCV_MTU_DEFAULT 9216
@@ -290,12 +288,7 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev)
290 return test_and_set_bit(0, &priv->tbusy); 288 return test_and_set_bit(0, &priv->tbusy);
291} 289}
292 290
293static u8 iucvMagic_ascii[16] = { 291static u8 iucvMagic[16] = {
294 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
295 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
296};
297
298static u8 iucvMagic_ebcdic[16] = {
299 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 292 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
300 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 293 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
301}; 294};
@@ -308,38 +301,18 @@ static u8 iucvMagic_ebcdic[16] = {
308 * 301 *
309 * @returns The printable string (static data!!) 302 * @returns The printable string (static data!!)
310 */ 303 */
311static char *netiucv_printname(char *name, int len) 304static char *netiucv_printname(char *name)
312{ 305{
313 static char tmp[17]; 306 static char tmp[9];
314 char *p = tmp; 307 char *p = tmp;
315 memcpy(tmp, name, len); 308 memcpy(tmp, name, 8);
316 tmp[len] = '\0'; 309 tmp[8] = '\0';
317 while (*p && ((p - tmp) < len) && (!isspace(*p))) 310 while (*p && (!isspace(*p)))
318 p++; 311 p++;
319 *p = '\0'; 312 *p = '\0';
320 return tmp; 313 return tmp;
321} 314}
322 315
323static char *netiucv_printuser(struct iucv_connection *conn)
324{
325 static char tmp_uid[9];
326 static char tmp_udat[17];
327 static char buf[100];
328
329 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
330 tmp_uid[8] = '\0';
331 tmp_udat[16] = '\0';
332 memcpy(tmp_uid, conn->userid, 8);
333 memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
334 memcpy(tmp_udat, conn->userdata, 16);
335 EBCASC(tmp_udat, 16);
336 memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
337 sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
338 return buf;
339 } else
340 return netiucv_printname(conn->userid, 8);
341}
342
343/** 316/**
344 * States of the interface statemachine. 317 * States of the interface statemachine.
345 */ 318 */
@@ -590,18 +563,15 @@ static int netiucv_callback_connreq(struct iucv_path *path,
590{ 563{
591 struct iucv_connection *conn = path->private; 564 struct iucv_connection *conn = path->private;
592 struct iucv_event ev; 565 struct iucv_event ev;
593 static char tmp_user[9];
594 static char tmp_udat[17];
595 int rc; 566 int rc;
596 567
568 if (memcmp(iucvMagic, ipuser, 16))
569 /* ipuser must match iucvMagic. */
570 return -EINVAL;
597 rc = -EINVAL; 571 rc = -EINVAL;
598 memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
599 memcpy(tmp_udat, ipuser, 16);
600 EBCASC(tmp_udat, 16);
601 read_lock_bh(&iucv_connection_rwlock); 572 read_lock_bh(&iucv_connection_rwlock);
602 list_for_each_entry(conn, &iucv_connection_list, list) { 573 list_for_each_entry(conn, &iucv_connection_list, list) {
603 if (strncmp(ipvmid, conn->userid, 8) || 574 if (strncmp(ipvmid, conn->userid, 8))
604 strncmp(ipuser, conn->userdata, 16))
605 continue; 575 continue;
606 /* Found a matching connection for this path. */ 576 /* Found a matching connection for this path. */
607 conn->path = path; 577 conn->path = path;
@@ -610,8 +580,6 @@ static int netiucv_callback_connreq(struct iucv_path *path,
610 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); 580 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
611 rc = 0; 581 rc = 0;
612 } 582 }
613 IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
614 tmp_user, netiucv_printname(tmp_udat, 16));
615 read_unlock_bh(&iucv_connection_rwlock); 583 read_unlock_bh(&iucv_connection_rwlock);
616 return rc; 584 return rc;
617} 585}
@@ -848,7 +816,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
848 conn->path = path; 816 conn->path = path;
849 path->msglim = NETIUCV_QUEUELEN_DEFAULT; 817 path->msglim = NETIUCV_QUEUELEN_DEFAULT;
850 path->flags = 0; 818 path->flags = 0;
851 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn); 819 rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
852 if (rc) { 820 if (rc) {
853 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); 821 IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
854 return; 822 return;
@@ -886,7 +854,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
886 854
887 IUCV_DBF_TEXT(trace, 3, __func__); 855 IUCV_DBF_TEXT(trace, 3, __func__);
888 fsm_deltimer(&conn->timer); 856 fsm_deltimer(&conn->timer);
889 iucv_path_sever(conn->path, conn->userdata); 857 iucv_path_sever(conn->path, NULL);
890 fsm_newstate(fi, CONN_STATE_STARTWAIT); 858 fsm_newstate(fi, CONN_STATE_STARTWAIT);
891} 859}
892 860
@@ -899,9 +867,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
899 IUCV_DBF_TEXT(trace, 3, __func__); 867 IUCV_DBF_TEXT(trace, 3, __func__);
900 868
901 fsm_deltimer(&conn->timer); 869 fsm_deltimer(&conn->timer);
902 iucv_path_sever(conn->path, conn->userdata); 870 iucv_path_sever(conn->path, NULL);
903 dev_info(privptr->dev, "The peer z/VM guest %s has closed the " 871 dev_info(privptr->dev, "The peer interface of the IUCV device"
904 "connection\n", netiucv_printuser(conn)); 872 " has closed the connection\n");
905 IUCV_DBF_TEXT(data, 2, 873 IUCV_DBF_TEXT(data, 2,
906 "conn_action_connsever: Remote dropped connection\n"); 874 "conn_action_connsever: Remote dropped connection\n");
907 fsm_newstate(fi, CONN_STATE_STARTWAIT); 875 fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -918,6 +886,8 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
918 IUCV_DBF_TEXT(trace, 3, __func__); 886 IUCV_DBF_TEXT(trace, 3, __func__);
919 887
920 fsm_newstate(fi, CONN_STATE_STARTWAIT); 888 fsm_newstate(fi, CONN_STATE_STARTWAIT);
889 IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
890 netdev->name, conn->userid);
921 891
922 /* 892 /*
923 * We must set the state before calling iucv_connect because the 893 * We must set the state before calling iucv_connect because the
@@ -927,11 +897,8 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
927 897
928 fsm_newstate(fi, CONN_STATE_SETUPWAIT); 898 fsm_newstate(fi, CONN_STATE_SETUPWAIT);
929 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); 899 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
930 IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
931 netdev->name, netiucv_printuser(conn));
932
933 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, 900 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
934 NULL, conn->userdata, conn); 901 NULL, iucvMagic, conn);
935 switch (rc) { 902 switch (rc) {
936 case 0: 903 case 0:
937 netdev->tx_queue_len = conn->path->msglim; 904 netdev->tx_queue_len = conn->path->msglim;
@@ -941,13 +908,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
941 case 11: 908 case 11:
942 dev_warn(privptr->dev, 909 dev_warn(privptr->dev,
943 "The IUCV device failed to connect to z/VM guest %s\n", 910 "The IUCV device failed to connect to z/VM guest %s\n",
944 netiucv_printname(conn->userid, 8)); 911 netiucv_printname(conn->userid));
945 fsm_newstate(fi, CONN_STATE_STARTWAIT); 912 fsm_newstate(fi, CONN_STATE_STARTWAIT);
946 break; 913 break;
947 case 12: 914 case 12:
948 dev_warn(privptr->dev, 915 dev_warn(privptr->dev,
949 "The IUCV device failed to connect to the peer on z/VM" 916 "The IUCV device failed to connect to the peer on z/VM"
950 " guest %s\n", netiucv_printname(conn->userid, 8)); 917 " guest %s\n", netiucv_printname(conn->userid));
951 fsm_newstate(fi, CONN_STATE_STARTWAIT); 918 fsm_newstate(fi, CONN_STATE_STARTWAIT);
952 break; 919 break;
953 case 13: 920 case 13:
@@ -960,7 +927,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
960 dev_err(privptr->dev, 927 dev_err(privptr->dev,
961 "z/VM guest %s has too many IUCV connections" 928 "z/VM guest %s has too many IUCV connections"
962 " to connect with the IUCV device\n", 929 " to connect with the IUCV device\n",
963 netiucv_printname(conn->userid, 8)); 930 netiucv_printname(conn->userid));
964 fsm_newstate(fi, CONN_STATE_CONNERR); 931 fsm_newstate(fi, CONN_STATE_CONNERR);
965 break; 932 break;
966 case 15: 933 case 15:
@@ -1005,7 +972,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
1005 netiucv_purge_skb_queue(&conn->collect_queue); 972 netiucv_purge_skb_queue(&conn->collect_queue);
1006 if (conn->path) { 973 if (conn->path) {
1007 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); 974 IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
1008 iucv_path_sever(conn->path, conn->userdata); 975 iucv_path_sever(conn->path, iucvMagic);
1009 kfree(conn->path); 976 kfree(conn->path);
1010 conn->path = NULL; 977 conn->path = NULL;
1011 } 978 }
@@ -1123,8 +1090,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
1123 fsm_newstate(fi, DEV_STATE_RUNNING); 1090 fsm_newstate(fi, DEV_STATE_RUNNING);
1124 dev_info(privptr->dev, 1091 dev_info(privptr->dev,
1125 "The IUCV device has been connected" 1092 "The IUCV device has been connected"
1126 " successfully to %s\n", 1093 " successfully to %s\n", privptr->conn->userid);
1127 netiucv_printuser(privptr->conn));
1128 IUCV_DBF_TEXT(setup, 3, 1094 IUCV_DBF_TEXT(setup, 3,
1129 "connection is up and running\n"); 1095 "connection is up and running\n");
1130 break; 1096 break;
@@ -1486,72 +1452,45 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1486 struct netiucv_priv *priv = dev_get_drvdata(dev); 1452 struct netiucv_priv *priv = dev_get_drvdata(dev);
1487 1453
1488 IUCV_DBF_TEXT(trace, 5, __func__); 1454 IUCV_DBF_TEXT(trace, 5, __func__);
1489 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn)); 1455 return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1490} 1456}
1491 1457
1492static int netiucv_check_user(const char *buf, size_t count, char *username, 1458static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1493 char *userdata) 1459 const char *buf, size_t count)
1494{ 1460{
1495 const char *p; 1461 struct netiucv_priv *priv = dev_get_drvdata(dev);
1496 int i; 1462 struct net_device *ndev = priv->conn->netdev;
1463 char *p;
1464 char *tmp;
1465 char username[9];
1466 int i;
1467 struct iucv_connection *cp;
1497 1468
1498 p = strchr(buf, '.'); 1469 IUCV_DBF_TEXT(trace, 3, __func__);
1499 if ((p && ((count > 26) || 1470 if (count > 9) {
1500 ((p - buf) > 8) || 1471 IUCV_DBF_TEXT_(setup, 2,
1501 (buf + count - p > 18))) || 1472 "%d is length of username\n", (int) count);
1502 (!p && (count > 9))) {
1503 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1504 return -EINVAL; 1473 return -EINVAL;
1505 } 1474 }
1506 1475
1507 for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) { 1476 tmp = strsep((char **) &buf, "\n");
1508 if (isalnum(*p) || *p == '$') { 1477 for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1509 username[i] = toupper(*p); 1478 if (isalnum(*p) || (*p == '$')) {
1479 username[i]= toupper(*p);
1510 continue; 1480 continue;
1511 } 1481 }
1512 if (*p == '\n') 1482 if (*p == '\n') {
1513 /* trailing lf, grr */ 1483 /* trailing lf, grr */
1514 break; 1484 break;
1485 }
1515 IUCV_DBF_TEXT_(setup, 2, 1486 IUCV_DBF_TEXT_(setup, 2,
1516 "conn_write: invalid character %02x\n", *p); 1487 "username: invalid character %c\n", *p);
1517 return -EINVAL; 1488 return -EINVAL;
1518 } 1489 }
1519 while (i < 8) 1490 while (i < 8)
1520 username[i++] = ' '; 1491 username[i++] = ' ';
1521 username[8] = '\0'; 1492 username[8] = '\0';
1522 1493
1523 if (*p == '.') {
1524 p++;
1525 for (i = 0; i < 16 && *p; i++, p++) {
1526 if (*p == '\n')
1527 break;
1528 userdata[i] = toupper(*p);
1529 }
1530 while (i > 0 && i < 16)
1531 userdata[i++] = ' ';
1532 } else
1533 memcpy(userdata, iucvMagic_ascii, 16);
1534 userdata[16] = '\0';
1535 ASCEBC(userdata, 16);
1536
1537 return 0;
1538}
1539
1540static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1541 const char *buf, size_t count)
1542{
1543 struct netiucv_priv *priv = dev_get_drvdata(dev);
1544 struct net_device *ndev = priv->conn->netdev;
1545 char username[9];
1546 char userdata[17];
1547 int rc;
1548 struct iucv_connection *cp;
1549
1550 IUCV_DBF_TEXT(trace, 3, __func__);
1551 rc = netiucv_check_user(buf, count, username, userdata);
1552 if (rc)
1553 return rc;
1554
1555 if (memcmp(username, priv->conn->userid, 9) && 1494 if (memcmp(username, priv->conn->userid, 9) &&
1556 (ndev->flags & (IFF_UP | IFF_RUNNING))) { 1495 (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1557 /* username changed while the interface is active. */ 1496 /* username changed while the interface is active. */
@@ -1560,17 +1499,15 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1560 } 1499 }
1561 read_lock_bh(&iucv_connection_rwlock); 1500 read_lock_bh(&iucv_connection_rwlock);
1562 list_for_each_entry(cp, &iucv_connection_list, list) { 1501 list_for_each_entry(cp, &iucv_connection_list, list) {
1563 if (!strncmp(username, cp->userid, 9) && 1502 if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1564 !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1565 read_unlock_bh(&iucv_connection_rwlock); 1503 read_unlock_bh(&iucv_connection_rwlock);
1566 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s " 1504 IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1567 "already exists\n", netiucv_printuser(cp)); 1505 "to %s already exists\n", username);
1568 return -EEXIST; 1506 return -EEXIST;
1569 } 1507 }
1570 } 1508 }
1571 read_unlock_bh(&iucv_connection_rwlock); 1509 read_unlock_bh(&iucv_connection_rwlock);
1572 memcpy(priv->conn->userid, username, 9); 1510 memcpy(priv->conn->userid, username, 9);
1573 memcpy(priv->conn->userdata, userdata, 17);
1574 return count; 1511 return count;
1575} 1512}
1576 1513
@@ -1600,8 +1537,7 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1600 bs1 = simple_strtoul(buf, &e, 0); 1537 bs1 = simple_strtoul(buf, &e, 0);
1601 1538
1602 if (e && (!isspace(*e))) { 1539 if (e && (!isspace(*e))) {
1603 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n", 1540 IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1604 *e);
1605 return -EINVAL; 1541 return -EINVAL;
1606 } 1542 }
1607 if (bs1 > NETIUCV_BUFSIZE_MAX) { 1543 if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1854,11 +1790,26 @@ static struct attribute_group netiucv_stat_attr_group = {
1854 .attrs = netiucv_stat_attrs, 1790 .attrs = netiucv_stat_attrs,
1855}; 1791};
1856 1792
1857static const struct attribute_group *netiucv_attr_groups[] = { 1793static int netiucv_add_files(struct device *dev)
1858 &netiucv_stat_attr_group, 1794{
1859 &netiucv_attr_group, 1795 int ret;
1860 NULL, 1796
1861}; 1797 IUCV_DBF_TEXT(trace, 3, __func__);
1798 ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1799 if (ret)
1800 return ret;
1801 ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1802 if (ret)
1803 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1804 return ret;
1805}
1806
1807static void netiucv_remove_files(struct device *dev)
1808{
1809 IUCV_DBF_TEXT(trace, 3, __func__);
1810 sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1811 sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1812}
1862 1813
1863static int netiucv_register_device(struct net_device *ndev) 1814static int netiucv_register_device(struct net_device *ndev)
1864{ 1815{
@@ -1872,7 +1823,6 @@ static int netiucv_register_device(struct net_device *ndev)
1872 dev_set_name(dev, "net%s", ndev->name); 1823 dev_set_name(dev, "net%s", ndev->name);
1873 dev->bus = &iucv_bus; 1824 dev->bus = &iucv_bus;
1874 dev->parent = iucv_root; 1825 dev->parent = iucv_root;
1875 dev->groups = netiucv_attr_groups;
1876 /* 1826 /*
1877 * The release function could be called after the 1827 * The release function could be called after the
1878 * module has been unloaded. It's _only_ task is to 1828 * module has been unloaded. It's _only_ task is to
@@ -1890,14 +1840,22 @@ static int netiucv_register_device(struct net_device *ndev)
1890 put_device(dev); 1840 put_device(dev);
1891 return ret; 1841 return ret;
1892 } 1842 }
1843 ret = netiucv_add_files(dev);
1844 if (ret)
1845 goto out_unreg;
1893 priv->dev = dev; 1846 priv->dev = dev;
1894 dev_set_drvdata(dev, priv); 1847 dev_set_drvdata(dev, priv);
1895 return 0; 1848 return 0;
1849
1850out_unreg:
1851 device_unregister(dev);
1852 return ret;
1896} 1853}
1897 1854
1898static void netiucv_unregister_device(struct device *dev) 1855static void netiucv_unregister_device(struct device *dev)
1899{ 1856{
1900 IUCV_DBF_TEXT(trace, 3, __func__); 1857 IUCV_DBF_TEXT(trace, 3, __func__);
1858 netiucv_remove_files(dev);
1901 device_unregister(dev); 1859 device_unregister(dev);
1902} 1860}
1903 1861
@@ -1906,8 +1864,7 @@ static void netiucv_unregister_device(struct device *dev)
1906 * Add it to the list of netiucv connections; 1864 * Add it to the list of netiucv connections;
1907 */ 1865 */
1908static struct iucv_connection *netiucv_new_connection(struct net_device *dev, 1866static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1909 char *username, 1867 char *username)
1910 char *userdata)
1911{ 1868{
1912 struct iucv_connection *conn; 1869 struct iucv_connection *conn;
1913 1870
@@ -1936,8 +1893,6 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1936 fsm_settimer(conn->fsm, &conn->timer); 1893 fsm_settimer(conn->fsm, &conn->timer);
1937 fsm_newstate(conn->fsm, CONN_STATE_INVALID); 1894 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1938 1895
1939 if (userdata)
1940 memcpy(conn->userdata, userdata, 17);
1941 if (username) { 1896 if (username) {
1942 memcpy(conn->userid, username, 9); 1897 memcpy(conn->userid, username, 9);
1943 fsm_newstate(conn->fsm, CONN_STATE_STOPPED); 1898 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1964,7 +1919,6 @@ out:
1964 */ 1919 */
1965static void netiucv_remove_connection(struct iucv_connection *conn) 1920static void netiucv_remove_connection(struct iucv_connection *conn)
1966{ 1921{
1967
1968 IUCV_DBF_TEXT(trace, 3, __func__); 1922 IUCV_DBF_TEXT(trace, 3, __func__);
1969 write_lock_bh(&iucv_connection_rwlock); 1923 write_lock_bh(&iucv_connection_rwlock);
1970 list_del_init(&conn->list); 1924 list_del_init(&conn->list);
@@ -1972,7 +1926,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
1972 fsm_deltimer(&conn->timer); 1926 fsm_deltimer(&conn->timer);
1973 netiucv_purge_skb_queue(&conn->collect_queue); 1927 netiucv_purge_skb_queue(&conn->collect_queue);
1974 if (conn->path) { 1928 if (conn->path) {
1975 iucv_path_sever(conn->path, conn->userdata); 1929 iucv_path_sever(conn->path, iucvMagic);
1976 kfree(conn->path); 1930 kfree(conn->path);
1977 conn->path = NULL; 1931 conn->path = NULL;
1978 } 1932 }
@@ -2031,7 +1985,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
2031/** 1985/**
2032 * Allocate and initialize everything of a net device. 1986 * Allocate and initialize everything of a net device.
2033 */ 1987 */
2034static struct net_device *netiucv_init_netdevice(char *username, char *userdata) 1988static struct net_device *netiucv_init_netdevice(char *username)
2035{ 1989{
2036 struct netiucv_priv *privptr; 1990 struct netiucv_priv *privptr;
2037 struct net_device *dev; 1991 struct net_device *dev;
@@ -2040,8 +1994,6 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2040 netiucv_setup_netdevice); 1994 netiucv_setup_netdevice);
2041 if (!dev) 1995 if (!dev)
2042 return NULL; 1996 return NULL;
2043 if (dev_alloc_name(dev, dev->name) < 0)
2044 goto out_netdev;
2045 1997
2046 privptr = netdev_priv(dev); 1998 privptr = netdev_priv(dev);
2047 privptr->fsm = init_fsm("netiucvdev", dev_state_names, 1999 privptr->fsm = init_fsm("netiucvdev", dev_state_names,
@@ -2050,7 +2002,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2050 if (!privptr->fsm) 2002 if (!privptr->fsm)
2051 goto out_netdev; 2003 goto out_netdev;
2052 2004
2053 privptr->conn = netiucv_new_connection(dev, username, userdata); 2005 privptr->conn = netiucv_new_connection(dev, username);
2054 if (!privptr->conn) { 2006 if (!privptr->conn) {
2055 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); 2007 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2056 goto out_fsm; 2008 goto out_fsm;
@@ -2068,31 +2020,47 @@ out_netdev:
2068static ssize_t conn_write(struct device_driver *drv, 2020static ssize_t conn_write(struct device_driver *drv,
2069 const char *buf, size_t count) 2021 const char *buf, size_t count)
2070{ 2022{
2023 const char *p;
2071 char username[9]; 2024 char username[9];
2072 char userdata[17]; 2025 int i, rc;
2073 int rc;
2074 struct net_device *dev; 2026 struct net_device *dev;
2075 struct netiucv_priv *priv; 2027 struct netiucv_priv *priv;
2076 struct iucv_connection *cp; 2028 struct iucv_connection *cp;
2077 2029
2078 IUCV_DBF_TEXT(trace, 3, __func__); 2030 IUCV_DBF_TEXT(trace, 3, __func__);
2079 rc = netiucv_check_user(buf, count, username, userdata); 2031 if (count>9) {
2080 if (rc) 2032 IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
2081 return rc; 2033 return -EINVAL;
2034 }
2035
2036 for (i = 0, p = buf; i < 8 && *p; i++, p++) {
2037 if (isalnum(*p) || *p == '$') {
2038 username[i] = toupper(*p);
2039 continue;
2040 }
2041 if (*p == '\n')
2042 /* trailing lf, grr */
2043 break;
2044 IUCV_DBF_TEXT_(setup, 2,
2045 "conn_write: invalid character %c\n", *p);
2046 return -EINVAL;
2047 }
2048 while (i < 8)
2049 username[i++] = ' ';
2050 username[8] = '\0';
2082 2051
2083 read_lock_bh(&iucv_connection_rwlock); 2052 read_lock_bh(&iucv_connection_rwlock);
2084 list_for_each_entry(cp, &iucv_connection_list, list) { 2053 list_for_each_entry(cp, &iucv_connection_list, list) {
2085 if (!strncmp(username, cp->userid, 9) && 2054 if (!strncmp(username, cp->userid, 9)) {
2086 !strncmp(userdata, cp->userdata, 17)) {
2087 read_unlock_bh(&iucv_connection_rwlock); 2055 read_unlock_bh(&iucv_connection_rwlock);
2088 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s " 2056 IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
2089 "already exists\n", netiucv_printuser(cp)); 2057 "to %s already exists\n", username);
2090 return -EEXIST; 2058 return -EEXIST;
2091 } 2059 }
2092 } 2060 }
2093 read_unlock_bh(&iucv_connection_rwlock); 2061 read_unlock_bh(&iucv_connection_rwlock);
2094 2062
2095 dev = netiucv_init_netdevice(username, userdata); 2063 dev = netiucv_init_netdevice(username);
2096 if (!dev) { 2064 if (!dev) {
2097 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); 2065 IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2098 return -ENODEV; 2066 return -ENODEV;
@@ -2113,9 +2081,8 @@ static ssize_t conn_write(struct device_driver *drv,
2113 if (rc) 2081 if (rc)
2114 goto out_unreg; 2082 goto out_unreg;
2115 2083
2116 dev_info(priv->dev, "The IUCV interface to %s has been established " 2084 dev_info(priv->dev, "The IUCV interface to %s has been"
2117 "successfully\n", 2085 " established successfully\n", netiucv_printname(username));
2118 netiucv_printuser(priv->conn));
2119 2086
2120 return count; 2087 return count;
2121 2088
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 480fbeab025..26a4110eeb2 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_core.h
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
4 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>,
@@ -11,6 +13,8 @@
11 13
12#include <linux/if.h> 14#include <linux/if.h>
13#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/if_tr.h>
17#include <linux/trdevice.h>
14#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
15#include <linux/if_vlan.h> 19#include <linux/if_vlan.h>
16#include <linux/ctype.h> 20#include <linux/ctype.h>
@@ -106,10 +110,6 @@ struct qeth_perf_stats {
106 110
107 unsigned int sc_dp_p; 111 unsigned int sc_dp_p;
108 unsigned int sc_p_dp; 112 unsigned int sc_p_dp;
109 /* qdio_cq_handler: number of times called, time spent in */
110 __u64 cq_start_time;
111 unsigned int cq_cnt;
112 unsigned int cq_time;
113 /* qdio_input_handler: number of times called, time spent in */ 113 /* qdio_input_handler: number of times called, time spent in */
114 __u64 inbound_start_time; 114 __u64 inbound_start_time;
115 unsigned int inbound_cnt; 115 unsigned int inbound_cnt;
@@ -213,7 +213,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
213 */ 213 */
214#define QETH_TX_TIMEOUT 100 * HZ 214#define QETH_TX_TIMEOUT 100 * HZ
215#define QETH_RCD_TIMEOUT 60 * HZ 215#define QETH_RCD_TIMEOUT 60 * HZ
216#define QETH_RECLAIM_WORK_TIME HZ
217#define QETH_HEADER_SIZE 32 216#define QETH_HEADER_SIZE 32
218#define QETH_MAX_PORTNO 15 217#define QETH_MAX_PORTNO 15
219 218
@@ -232,7 +231,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
232#define QETH_IN_BUF_COUNT_MAX 128 231#define QETH_IN_BUF_COUNT_MAX 128
233#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) 232#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
234#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ 233#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
235 ((card)->qdio.in_buf_pool.buf_count / 2) 234 ((card)->qdio.in_buf_pool.buf_count / 2)
236 235
237/* buffers we have to be behind before we get a PCI */ 236/* buffers we have to be behind before we get a PCI */
238#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) 237#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
@@ -261,7 +260,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
261 260
262/* large receive scatter gather copy break */ 261/* large receive scatter gather copy break */
263#define QETH_RX_SG_CB (PAGE_SIZE >> 1) 262#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
264#define QETH_RX_PULL_LEN 256
265 263
266struct qeth_hdr_layer3 { 264struct qeth_hdr_layer3 {
267 __u8 id; 265 __u8 id;
@@ -377,21 +375,6 @@ enum qeth_qdio_buffer_states {
377 * outbound: filled by driver; owned by hardware in order to be sent 375 * outbound: filled by driver; owned by hardware in order to be sent
378 */ 376 */
379 QETH_QDIO_BUF_PRIMED, 377 QETH_QDIO_BUF_PRIMED,
380 /*
381 * inbound: not applicable
382 * outbound: identified to be pending in TPQ
383 */
384 QETH_QDIO_BUF_PENDING,
385 /*
386 * inbound: not applicable
387 * outbound: found in completion queue
388 */
389 QETH_QDIO_BUF_IN_CQ,
390 /*
391 * inbound: not applicable
392 * outbound: handled via transfer pending / completion queue
393 */
394 QETH_QDIO_BUF_HANDLED_DELAYED,
395}; 378};
396 379
397enum qeth_qdio_info_states { 380enum qeth_qdio_info_states {
@@ -416,7 +399,6 @@ struct qeth_qdio_buffer {
416 struct qdio_buffer *buffer; 399 struct qdio_buffer *buffer;
417 /* the buffer pool entry currently associated to this buffer */ 400 /* the buffer pool entry currently associated to this buffer */
418 struct qeth_buffer_pool_entry *pool_entry; 401 struct qeth_buffer_pool_entry *pool_entry;
419 struct sk_buff *rx_skb;
420}; 402};
421 403
422struct qeth_qdio_q { 404struct qeth_qdio_q {
@@ -430,11 +412,8 @@ struct qeth_qdio_out_buffer {
430 atomic_t state; 412 atomic_t state;
431 int next_element_to_fill; 413 int next_element_to_fill;
432 struct sk_buff_head skb_list; 414 struct sk_buff_head skb_list;
415 struct list_head ctx_list;
433 int is_header[16]; 416 int is_header[16];
434
435 struct qaob *aob;
436 struct qeth_qdio_out_q *q;
437 struct qeth_qdio_out_buffer *next_pending;
438}; 417};
439 418
440struct qeth_card; 419struct qeth_card;
@@ -447,8 +426,7 @@ enum qeth_out_q_states {
447 426
448struct qeth_qdio_out_q { 427struct qeth_qdio_out_q {
449 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; 428 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
450 struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; 429 struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
451 struct qdio_outbuf_state *bufstates; /* convenience pointer */
452 int queue_no; 430 int queue_no;
453 struct qeth_card *card; 431 struct qeth_card *card;
454 atomic_t state; 432 atomic_t state;
@@ -469,9 +447,7 @@ struct qeth_qdio_out_q {
469struct qeth_qdio_info { 447struct qeth_qdio_info {
470 atomic_t state; 448 atomic_t state;
471 /* input */ 449 /* input */
472 int no_in_queues;
473 struct qeth_qdio_q *in_q; 450 struct qeth_qdio_q *in_q;
474 struct qeth_qdio_q *c_q;
475 struct qeth_qdio_buffer_pool in_buf_pool; 451 struct qeth_qdio_buffer_pool in_buf_pool;
476 struct qeth_qdio_buffer_pool init_pool; 452 struct qeth_qdio_buffer_pool init_pool;
477 int in_buf_size; 453 int in_buf_size;
@@ -479,7 +455,6 @@ struct qeth_qdio_info {
479 /* output */ 455 /* output */
480 int no_out_queues; 456 int no_out_queues;
481 struct qeth_qdio_out_q **out_qs; 457 struct qeth_qdio_out_q **out_qs;
482 struct qdio_outbuf_state *out_bufstates;
483 458
484 /* priority queueing */ 459 /* priority queueing */
485 int do_prio_queueing; 460 int do_prio_queueing;
@@ -551,12 +526,6 @@ enum qeth_cmd_buffer_state {
551 BUF_STATE_PROCESSED, 526 BUF_STATE_PROCESSED,
552}; 527};
553 528
554enum qeth_cq {
555 QETH_CQ_DISABLED = 0,
556 QETH_CQ_ENABLED = 1,
557 QETH_CQ_NOTAVAILABLE = 2,
558};
559
560struct qeth_ipato { 529struct qeth_ipato {
561 int enabled; 530 int enabled;
562 int invert4; 531 int invert4;
@@ -672,6 +641,8 @@ struct qeth_card_options {
672 struct qeth_ipa_info adp; /*Adapter parameters*/ 641 struct qeth_ipa_info adp; /*Adapter parameters*/
673 struct qeth_routing_info route6; 642 struct qeth_routing_info route6;
674 struct qeth_ipa_info ipa6; 643 struct qeth_ipa_info ipa6;
644 int broadcast_mode;
645 int macaddr_mode;
675 int fake_broadcast; 646 int fake_broadcast;
676 int add_hhlen; 647 int add_hhlen;
677 int layer2; 648 int layer2;
@@ -679,8 +650,6 @@ struct qeth_card_options {
679 int rx_sg_cb; 650 int rx_sg_cb;
680 enum qeth_ipa_isolation_modes isolation; 651 enum qeth_ipa_isolation_modes isolation;
681 int sniffer; 652 int sniffer;
682 enum qeth_cq cq;
683 char hsuid[9];
684}; 653};
685 654
686/* 655/*
@@ -705,16 +674,7 @@ struct qeth_discipline {
705 qdio_handler_t *input_handler; 674 qdio_handler_t *input_handler;
706 qdio_handler_t *output_handler; 675 qdio_handler_t *output_handler;
707 int (*recover)(void *ptr); 676 int (*recover)(void *ptr);
708 int (*setup) (struct ccwgroup_device *); 677 struct ccwgroup_driver *ccwgdriver;
709 void (*remove) (struct ccwgroup_device *);
710 int (*set_online) (struct ccwgroup_device *);
711 int (*set_offline) (struct ccwgroup_device *);
712 void (*shutdown)(struct ccwgroup_device *);
713 int (*prepare) (struct ccwgroup_device *);
714 void (*complete) (struct ccwgroup_device *);
715 int (*freeze)(struct ccwgroup_device *);
716 int (*thaw) (struct ccwgroup_device *);
717 int (*restore)(struct ccwgroup_device *);
718}; 678};
719 679
720struct qeth_vlan_vid { 680struct qeth_vlan_vid {
@@ -778,7 +738,7 @@ struct qeth_card {
778 struct qeth_perf_stats perf_stats; 738 struct qeth_perf_stats perf_stats;
779 int read_or_write_problem; 739 int read_or_write_problem;
780 struct qeth_osn_info osn_info; 740 struct qeth_osn_info osn_info;
781 struct qeth_discipline *discipline; 741 struct qeth_discipline discipline;
782 atomic_t force_alloc_skb; 742 atomic_t force_alloc_skb;
783 struct service_level qeth_service_level; 743 struct service_level qeth_service_level;
784 struct qdio_ssqd_desc ssqd; 744 struct qdio_ssqd_desc ssqd;
@@ -787,8 +747,6 @@ struct qeth_card {
787 struct mutex discipline_mutex; 747 struct mutex discipline_mutex;
788 struct napi_struct napi; 748 struct napi_struct napi;
789 struct qeth_rx rx; 749 struct qeth_rx rx;
790 struct delayed_work buffer_reclaim_work;
791 int reclaim_index;
792}; 750};
793 751
794struct qeth_card_list_struct { 752struct qeth_card_list_struct {
@@ -844,16 +802,16 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card,
844 return card->info.diagass_support & (__u32)cmd; 802 return card->info.diagass_support & (__u32)cmd;
845} 803}
846 804
847extern struct qeth_discipline qeth_l2_discipline; 805extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
848extern struct qeth_discipline qeth_l3_discipline; 806extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
849extern const struct attribute_group *qeth_generic_attr_groups[];
850extern const struct attribute_group *qeth_osn_attr_groups[];
851
852const char *qeth_get_cardname_short(struct qeth_card *); 807const char *qeth_get_cardname_short(struct qeth_card *);
853int qeth_realloc_buffer_pool(struct qeth_card *, int); 808int qeth_realloc_buffer_pool(struct qeth_card *, int);
854int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); 809int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
855void qeth_core_free_discipline(struct qeth_card *); 810void qeth_core_free_discipline(struct qeth_card *);
856void qeth_buffer_reclaim_work(struct work_struct *); 811int qeth_core_create_device_attributes(struct device *);
812void qeth_core_remove_device_attributes(struct device *);
813int qeth_core_create_osn_attributes(struct device *);
814void qeth_core_remove_osn_attributes(struct device *);
857 815
858/* exports for qeth discipline device drivers */ 816/* exports for qeth discipline device drivers */
859extern struct qeth_card_list_struct qeth_core_card_list; 817extern struct qeth_card_list_struct qeth_core_card_list;
@@ -882,7 +840,7 @@ int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
882 unsigned int, const char *); 840 unsigned int, const char *);
883void qeth_queue_input_buffer(struct qeth_card *, int); 841void qeth_queue_input_buffer(struct qeth_card *, int);
884struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, 842struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
885 struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, 843 struct qdio_buffer *, struct qdio_buffer_element **, int *,
886 struct qeth_hdr **); 844 struct qeth_hdr **);
887void qeth_schedule_recovery(struct qeth_card *); 845void qeth_schedule_recovery(struct qeth_card *);
888void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long); 846void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
@@ -908,7 +866,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
908struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); 866struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
909int qeth_mdio_read(struct net_device *, int, int); 867int qeth_mdio_read(struct net_device *, int, int);
910int qeth_snmp_command(struct qeth_card *, char __user *); 868int qeth_snmp_command(struct qeth_card *, char __user *);
911int qeth_query_oat_command(struct qeth_card *, char __user *);
912struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); 869struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
913int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, 870int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
914 unsigned long); 871 unsigned long);
@@ -930,10 +887,8 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
930int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 887int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
931int qeth_set_access_ctrl_online(struct qeth_card *card); 888int qeth_set_access_ctrl_online(struct qeth_card *card);
932int qeth_hdr_chk_and_bounce(struct sk_buff *, int); 889int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
933int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
934int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); 890int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
935int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); 891int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
936void qeth_trace_features(struct qeth_card *);
937 892
938/* exports for OSN */ 893/* exports for OSN */
939int qeth_osn_assist(struct net_device *, void *, int); 894int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 638a57f4d8a..4550573c25e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_core_main.c
3 *
2 * Copyright IBM Corp. 2007, 2009 4 * Copyright IBM Corp. 2007, 2009
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
4 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>,
@@ -19,12 +21,10 @@
19#include <linux/mii.h> 21#include <linux/mii.h>
20#include <linux/kthread.h> 22#include <linux/kthread.h>
21#include <linux/slab.h> 23#include <linux/slab.h>
22#include <net/iucv/af_iucv.h>
23 24
24#include <asm/ebcdic.h> 25#include <asm/ebcdic.h>
25#include <asm/io.h> 26#include <asm/io.h>
26#include <asm/sysinfo.h> 27#include <asm/sysinfo.h>
27#include <asm/compat.h>
28 28
29#include "qeth_core.h" 29#include "qeth_core.h"
30 30
@@ -44,12 +44,10 @@ struct qeth_card_list_struct qeth_core_card_list;
44EXPORT_SYMBOL_GPL(qeth_core_card_list); 44EXPORT_SYMBOL_GPL(qeth_core_card_list);
45struct kmem_cache *qeth_core_header_cache; 45struct kmem_cache *qeth_core_header_cache;
46EXPORT_SYMBOL_GPL(qeth_core_header_cache); 46EXPORT_SYMBOL_GPL(qeth_core_header_cache);
47static struct kmem_cache *qeth_qdio_outbuf_cache;
48 47
49static struct device *qeth_core_root_dev; 48static struct device *qeth_core_root_dev;
50static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY; 49static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
51static struct lock_class_key qdio_out_skb_queue_key; 50static struct lock_class_key qdio_out_skb_queue_key;
52static struct mutex qeth_mod_mutex;
53 51
54static void qeth_send_control_data_cb(struct qeth_channel *, 52static void qeth_send_control_data_cb(struct qeth_channel *,
55 struct qeth_cmd_buffer *); 53 struct qeth_cmd_buffer *);
@@ -58,28 +56,20 @@ static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
58static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); 56static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
59static void qeth_free_buffer_pool(struct qeth_card *); 57static void qeth_free_buffer_pool(struct qeth_card *);
60static int qeth_qdio_establish(struct qeth_card *); 58static int qeth_qdio_establish(struct qeth_card *);
61static void qeth_free_qdio_buffers(struct qeth_card *); 59
62static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
63 struct qeth_qdio_out_buffer *buf,
64 enum iucv_tx_notify notification);
65static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
66static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
67 struct qeth_qdio_out_buffer *buf,
68 enum qeth_qdio_buffer_states newbufstate);
69static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
70 60
71static inline const char *qeth_get_cardname(struct qeth_card *card) 61static inline const char *qeth_get_cardname(struct qeth_card *card)
72{ 62{
73 if (card->info.guestlan) { 63 if (card->info.guestlan) {
74 switch (card->info.type) { 64 switch (card->info.type) {
75 case QETH_CARD_TYPE_OSD: 65 case QETH_CARD_TYPE_OSD:
76 return " Virtual NIC QDIO"; 66 return " Guest LAN QDIO";
77 case QETH_CARD_TYPE_IQD: 67 case QETH_CARD_TYPE_IQD:
78 return " Virtual NIC Hiper"; 68 return " Guest LAN Hiper";
79 case QETH_CARD_TYPE_OSM: 69 case QETH_CARD_TYPE_OSM:
80 return " Virtual NIC QDIO - OSM"; 70 return " Guest LAN QDIO - OSM";
81 case QETH_CARD_TYPE_OSX: 71 case QETH_CARD_TYPE_OSX:
82 return " Virtual NIC QDIO - OSX"; 72 return " Guest LAN QDIO - OSX";
83 default: 73 default:
84 return " unknown"; 74 return " unknown";
85 } 75 }
@@ -108,13 +98,13 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
108 if (card->info.guestlan) { 98 if (card->info.guestlan) {
109 switch (card->info.type) { 99 switch (card->info.type) {
110 case QETH_CARD_TYPE_OSD: 100 case QETH_CARD_TYPE_OSD:
111 return "Virt.NIC QDIO"; 101 return "GuestLAN QDIO";
112 case QETH_CARD_TYPE_IQD: 102 case QETH_CARD_TYPE_IQD:
113 return "Virt.NIC Hiper"; 103 return "GuestLAN Hiper";
114 case QETH_CARD_TYPE_OSM: 104 case QETH_CARD_TYPE_OSM:
115 return "Virt.NIC OSM"; 105 return "GuestLAN OSM";
116 case QETH_CARD_TYPE_OSX: 106 case QETH_CARD_TYPE_OSX:
117 return "Virt.NIC OSX"; 107 return "GuestLAN OSX";
118 default: 108 default:
119 return "unknown"; 109 return "unknown";
120 } 110 }
@@ -209,7 +199,7 @@ static int qeth_alloc_buffer_pool(struct qeth_card *card)
209 199
210 QETH_CARD_TEXT(card, 5, "alocpool"); 200 QETH_CARD_TEXT(card, 5, "alocpool");
211 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { 201 for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
212 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); 202 pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL);
213 if (!pool_entry) { 203 if (!pool_entry) {
214 qeth_free_buffer_pool(card); 204 qeth_free_buffer_pool(card);
215 return -ENOMEM; 205 return -ENOMEM;
@@ -249,205 +239,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
249} 239}
250EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); 240EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
251 241
252static inline int qeth_cq_init(struct qeth_card *card)
253{
254 int rc;
255
256 if (card->options.cq == QETH_CQ_ENABLED) {
257 QETH_DBF_TEXT(SETUP, 2, "cqinit");
258 memset(card->qdio.c_q->qdio_bufs, 0,
259 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
260 card->qdio.c_q->next_buf_to_init = 127;
261 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
262 card->qdio.no_in_queues - 1, 0,
263 127);
264 if (rc) {
265 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
266 goto out;
267 }
268 }
269 rc = 0;
270out:
271 return rc;
272}
273
274static inline int qeth_alloc_cq(struct qeth_card *card)
275{
276 int rc;
277
278 if (card->options.cq == QETH_CQ_ENABLED) {
279 int i;
280 struct qdio_outbuf_state *outbuf_states;
281
282 QETH_DBF_TEXT(SETUP, 2, "cqon");
283 card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q),
284 GFP_KERNEL);
285 if (!card->qdio.c_q) {
286 rc = -1;
287 goto kmsg_out;
288 }
289 QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
290
291 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
292 card->qdio.c_q->bufs[i].buffer =
293 &card->qdio.c_q->qdio_bufs[i];
294 }
295
296 card->qdio.no_in_queues = 2;
297
298 card->qdio.out_bufstates = (struct qdio_outbuf_state *)
299 kzalloc(card->qdio.no_out_queues *
300 QDIO_MAX_BUFFERS_PER_Q *
301 sizeof(struct qdio_outbuf_state), GFP_KERNEL);
302 outbuf_states = card->qdio.out_bufstates;
303 if (outbuf_states == NULL) {
304 rc = -1;
305 goto free_cq_out;
306 }
307 for (i = 0; i < card->qdio.no_out_queues; ++i) {
308 card->qdio.out_qs[i]->bufstates = outbuf_states;
309 outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
310 }
311 } else {
312 QETH_DBF_TEXT(SETUP, 2, "nocq");
313 card->qdio.c_q = NULL;
314 card->qdio.no_in_queues = 1;
315 }
316 QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
317 rc = 0;
318out:
319 return rc;
320free_cq_out:
321 kfree(card->qdio.c_q);
322 card->qdio.c_q = NULL;
323kmsg_out:
324 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
325 goto out;
326}
327
328static inline void qeth_free_cq(struct qeth_card *card)
329{
330 if (card->qdio.c_q) {
331 --card->qdio.no_in_queues;
332 kfree(card->qdio.c_q);
333 card->qdio.c_q = NULL;
334 }
335 kfree(card->qdio.out_bufstates);
336 card->qdio.out_bufstates = NULL;
337}
338
339static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
340 int delayed) {
341 enum iucv_tx_notify n;
342
343 switch (sbalf15) {
344 case 0:
345 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
346 break;
347 case 4:
348 case 16:
349 case 17:
350 case 18:
351 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
352 TX_NOTIFY_UNREACHABLE;
353 break;
354 default:
355 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
356 TX_NOTIFY_GENERALERROR;
357 break;
358 }
359
360 return n;
361}
362
363static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
364 int bidx, int forced_cleanup)
365{
366 if (q->card->options.cq != QETH_CQ_ENABLED)
367 return;
368
369 if (q->bufs[bidx]->next_pending != NULL) {
370 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
371 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
372
373 while (c) {
374 if (forced_cleanup ||
375 atomic_read(&c->state) ==
376 QETH_QDIO_BUF_HANDLED_DELAYED) {
377 struct qeth_qdio_out_buffer *f = c;
378 QETH_CARD_TEXT(f->q->card, 5, "fp");
379 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
380 /* release here to avoid interleaving between
381 outbound tasklet and inbound tasklet
382 regarding notifications and lifecycle */
383 qeth_release_skbs(c);
384
385 c = f->next_pending;
386 WARN_ON_ONCE(head->next_pending != f);
387 head->next_pending = c;
388 kmem_cache_free(qeth_qdio_outbuf_cache, f);
389 } else {
390 head = c;
391 c = c->next_pending;
392 }
393
394 }
395 }
396 if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
397 QETH_QDIO_BUF_HANDLED_DELAYED)) {
398 /* for recovery situations */
399 q->bufs[bidx]->aob = q->bufstates[bidx].aob;
400 qeth_init_qdio_out_buf(q, bidx);
401 QETH_CARD_TEXT(q->card, 2, "clprecov");
402 }
403}
404
405
406static inline void qeth_qdio_handle_aob(struct qeth_card *card,
407 unsigned long phys_aob_addr) {
408 struct qaob *aob;
409 struct qeth_qdio_out_buffer *buffer;
410 enum iucv_tx_notify notification;
411
412 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
413 QETH_CARD_TEXT(card, 5, "haob");
414 QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
415 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
416 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
417
418 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
419 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
420 notification = TX_NOTIFY_OK;
421 } else {
422 WARN_ON_ONCE(atomic_read(&buffer->state) !=
423 QETH_QDIO_BUF_PENDING);
424 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
425 notification = TX_NOTIFY_DELAYED_OK;
426 }
427
428 if (aob->aorc != 0) {
429 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
430 notification = qeth_compute_cq_notification(aob->aorc, 1);
431 }
432 qeth_notify_skbs(buffer->q, buffer, notification);
433
434 buffer->aob = NULL;
435 qeth_clear_output_buffer(buffer->q, buffer,
436 QETH_QDIO_BUF_HANDLED_DELAYED);
437
438 /* from here on: do not touch buffer anymore */
439 qdio_release_aob(aob);
440}
441
442static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
443{
444 return card->options.cq == QETH_CQ_ENABLED &&
445 card->qdio.c_q != NULL &&
446 queue != 0 &&
447 queue == card->qdio.no_in_queues - 1;
448}
449
450
451static int qeth_issue_next_read(struct qeth_card *card) 242static int qeth_issue_next_read(struct qeth_card *card)
452{ 243{
453 int rc; 244 int rc;
@@ -488,7 +279,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
488 atomic_set(&reply->refcnt, 1); 279 atomic_set(&reply->refcnt, 1);
489 atomic_set(&reply->received, 0); 280 atomic_set(&reply->received, 0);
490 reply->card = card; 281 reply->card = card;
491 } 282 };
492 return reply; 283 return reply;
493} 284}
494 285
@@ -676,7 +467,6 @@ void qeth_release_buffer(struct qeth_channel *channel,
676 iob->callback = qeth_send_control_data_cb; 467 iob->callback = qeth_send_control_data_cb;
677 iob->rc = 0; 468 iob->rc = 0;
678 spin_unlock_irqrestore(&channel->iob_lock, flags); 469 spin_unlock_irqrestore(&channel->iob_lock, flags);
679 wake_up(&channel->wait_q);
680} 470}
681EXPORT_SYMBOL_GPL(qeth_release_buffer); 471EXPORT_SYMBOL_GPL(qeth_release_buffer);
682 472
@@ -799,7 +589,7 @@ static int qeth_setup_channel(struct qeth_channel *channel)
799 QETH_DBF_TEXT(SETUP, 2, "setupch"); 589 QETH_DBF_TEXT(SETUP, 2, "setupch");
800 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { 590 for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
801 channel->iob[cnt].data = 591 channel->iob[cnt].data =
802 kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); 592 kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
803 if (channel->iob[cnt].data == NULL) 593 if (channel->iob[cnt].data == NULL)
804 break; 594 break;
805 channel->iob[cnt].state = BUF_STATE_FREE; 595 channel->iob[cnt].state = BUF_STATE_FREE;
@@ -1093,74 +883,22 @@ out:
1093 return; 883 return;
1094} 884}
1095 885
1096static void qeth_notify_skbs(struct qeth_qdio_out_q *q, 886static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1097 struct qeth_qdio_out_buffer *buf, 887 struct qeth_qdio_out_buffer *buf)
1098 enum iucv_tx_notify notification)
1099{
1100 struct sk_buff *skb;
1101
1102 if (skb_queue_empty(&buf->skb_list))
1103 goto out;
1104 skb = skb_peek(&buf->skb_list);
1105 while (skb) {
1106 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1107 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1108 if (skb->protocol == ETH_P_AF_IUCV) {
1109 if (skb->sk) {
1110 struct iucv_sock *iucv = iucv_sk(skb->sk);
1111 iucv->sk_txnotify(skb, notification);
1112 }
1113 }
1114 if (skb_queue_is_last(&buf->skb_list, skb))
1115 skb = NULL;
1116 else
1117 skb = skb_queue_next(&buf->skb_list, skb);
1118 }
1119out:
1120 return;
1121}
1122
1123static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1124{ 888{
889 int i;
1125 struct sk_buff *skb; 890 struct sk_buff *skb;
1126 struct iucv_sock *iucv;
1127 int notify_general_error = 0;
1128
1129 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1130 notify_general_error = 1;
1131 891
1132 /* release may never happen from within CQ tasklet scope */ 892 /* is PCI flag set on buffer? */
1133 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 893 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
894 atomic_dec(&queue->set_pci_flags_count);
1134 895
1135 skb = skb_dequeue(&buf->skb_list); 896 skb = skb_dequeue(&buf->skb_list);
1136 while (skb) { 897 while (skb) {
1137 QETH_CARD_TEXT(buf->q->card, 5, "skbr");
1138 QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
1139 if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
1140 if (skb->sk) {
1141 iucv = iucv_sk(skb->sk);
1142 iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
1143 }
1144 }
1145 atomic_dec(&skb->users); 898 atomic_dec(&skb->users);
1146 dev_kfree_skb_any(skb); 899 dev_kfree_skb_any(skb);
1147 skb = skb_dequeue(&buf->skb_list); 900 skb = skb_dequeue(&buf->skb_list);
1148 } 901 }
1149}
1150
1151static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1152 struct qeth_qdio_out_buffer *buf,
1153 enum qeth_qdio_buffer_states newbufstate)
1154{
1155 int i;
1156
1157 /* is PCI flag set on buffer? */
1158 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1159 atomic_dec(&queue->set_pci_flags_count);
1160
1161 if (newbufstate == QETH_QDIO_BUF_EMPTY) {
1162 qeth_release_skbs(buf);
1163 }
1164 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 902 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
1165 if (buf->buffer->element[i].addr && buf->is_header[i]) 903 if (buf->buffer->element[i].addr && buf->is_header[i])
1166 kmem_cache_free(qeth_core_header_cache, 904 kmem_cache_free(qeth_core_header_cache,
@@ -1174,36 +912,21 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1174 buf->buffer->element[15].eflags = 0; 912 buf->buffer->element[15].eflags = 0;
1175 buf->buffer->element[15].sflags = 0; 913 buf->buffer->element[15].sflags = 0;
1176 buf->next_element_to_fill = 0; 914 buf->next_element_to_fill = 0;
1177 atomic_set(&buf->state, newbufstate); 915 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1178}
1179
1180static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
1181{
1182 int j;
1183
1184 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1185 if (!q->bufs[j])
1186 continue;
1187 qeth_cleanup_handled_pending(q, j, 1);
1188 qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
1189 if (free) {
1190 kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1191 q->bufs[j] = NULL;
1192 }
1193 }
1194} 916}
1195 917
1196void qeth_clear_qdio_buffers(struct qeth_card *card) 918void qeth_clear_qdio_buffers(struct qeth_card *card)
1197{ 919{
1198 int i; 920 int i, j;
1199 921
1200 QETH_CARD_TEXT(card, 2, "clearqdbf"); 922 QETH_CARD_TEXT(card, 2, "clearqdbf");
1201 /* clear outbound buffers to free skbs */ 923 /* clear outbound buffers to free skbs */
1202 for (i = 0; i < card->qdio.no_out_queues; ++i) { 924 for (i = 0; i < card->qdio.no_out_queues; ++i)
1203 if (card->qdio.out_qs[i]) { 925 if (card->qdio.out_qs[i]) {
1204 qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); 926 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
927 qeth_clear_output_buffer(card->qdio.out_qs[i],
928 &card->qdio.out_qs[i]->bufs[j]);
1205 } 929 }
1206 }
1207} 930}
1208EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); 931EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
1209 932
@@ -1227,11 +950,6 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
1227 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == 950 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
1228 QETH_QDIO_UNINITIALIZED) 951 QETH_QDIO_UNINITIALIZED)
1229 return; 952 return;
1230
1231 qeth_free_cq(card);
1232 cancel_delayed_work_sync(&card->buffer_reclaim_work);
1233 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
1234 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
1235 kfree(card->qdio.in_q); 953 kfree(card->qdio.in_q);
1236 card->qdio.in_q = NULL; 954 card->qdio.in_q = NULL;
1237 /* inbound buffer pool */ 955 /* inbound buffer pool */
@@ -1239,7 +957,9 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
1239 /* free outbound qdio_qs */ 957 /* free outbound qdio_qs */
1240 if (card->qdio.out_qs) { 958 if (card->qdio.out_qs) {
1241 for (i = 0; i < card->qdio.no_out_queues; ++i) { 959 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1242 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); 960 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
961 qeth_clear_output_buffer(card->qdio.out_qs[i],
962 &card->qdio.out_qs[i]->bufs[j]);
1243 kfree(card->qdio.out_qs[i]); 963 kfree(card->qdio.out_qs[i]);
1244 } 964 }
1245 kfree(card->qdio.out_qs); 965 kfree(card->qdio.out_qs);
@@ -1256,30 +976,7 @@ static void qeth_clean_channel(struct qeth_channel *channel)
1256 kfree(channel->iob[cnt].data); 976 kfree(channel->iob[cnt].data);
1257} 977}
1258 978
1259static void qeth_set_single_write_queues(struct qeth_card *card) 979static void qeth_get_channel_path_desc(struct qeth_card *card)
1260{
1261 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1262 (card->qdio.no_out_queues == 4))
1263 qeth_free_qdio_buffers(card);
1264
1265 card->qdio.no_out_queues = 1;
1266 if (card->qdio.default_out_queue != 0)
1267 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1268
1269 card->qdio.default_out_queue = 0;
1270}
1271
1272static void qeth_set_multiple_write_queues(struct qeth_card *card)
1273{
1274 if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
1275 (card->qdio.no_out_queues == 1)) {
1276 qeth_free_qdio_buffers(card);
1277 card->qdio.default_out_queue = 2;
1278 }
1279 card->qdio.no_out_queues = 4;
1280}
1281
1282static void qeth_update_from_chp_desc(struct qeth_card *card)
1283{ 980{
1284 struct ccw_device *ccwdev; 981 struct ccw_device *ccwdev;
1285 struct channelPath_dsc { 982 struct channelPath_dsc {
@@ -1296,23 +993,36 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
1296 QETH_DBF_TEXT(SETUP, 2, "chp_desc"); 993 QETH_DBF_TEXT(SETUP, 2, "chp_desc");
1297 994
1298 ccwdev = card->data.ccwdev; 995 ccwdev = card->data.ccwdev;
1299 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); 996 chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
1300 if (!chp_dsc) 997 if (chp_dsc != NULL) {
1301 goto out; 998 /* CHPP field bit 6 == 1 -> single queue */
1302 999 if ((chp_dsc->chpp & 0x02) == 0x02) {
1303 card->info.func_level = 0x4100 + chp_dsc->desc; 1000 if ((atomic_read(&card->qdio.state) !=
1304 if (card->info.type == QETH_CARD_TYPE_IQD) 1001 QETH_QDIO_UNINITIALIZED) &&
1305 goto out; 1002 (card->qdio.no_out_queues == 4))
1306 1003 /* change from 4 to 1 outbound queues */
1307 /* CHPP field bit 6 == 1 -> single queue */ 1004 qeth_free_qdio_buffers(card);
1308 if ((chp_dsc->chpp & 0x02) == 0x02) 1005 card->qdio.no_out_queues = 1;
1309 qeth_set_single_write_queues(card); 1006 if (card->qdio.default_out_queue != 0)
1310 else 1007 dev_info(&card->gdev->dev,
1311 qeth_set_multiple_write_queues(card); 1008 "Priority Queueing not supported\n");
1312out: 1009 card->qdio.default_out_queue = 0;
1313 kfree(chp_dsc); 1010 } else {
1011 if ((atomic_read(&card->qdio.state) !=
1012 QETH_QDIO_UNINITIALIZED) &&
1013 (card->qdio.no_out_queues == 1)) {
1014 /* change from 1 to 4 outbound queues */
1015 qeth_free_qdio_buffers(card);
1016 card->qdio.default_out_queue = 2;
1017 }
1018 card->qdio.no_out_queues = 4;
1019 }
1020 card->info.func_level = 0x4100 + chp_dsc->desc;
1021 kfree(chp_dsc);
1022 }
1314 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); 1023 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
1315 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); 1024 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
1025 return;
1316} 1026}
1317 1027
1318static void qeth_init_qdio_info(struct qeth_card *card) 1028static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1334,12 +1044,13 @@ static void qeth_set_intial_options(struct qeth_card *card)
1334{ 1044{
1335 card->options.route4.type = NO_ROUTER; 1045 card->options.route4.type = NO_ROUTER;
1336 card->options.route6.type = NO_ROUTER; 1046 card->options.route6.type = NO_ROUTER;
1047 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
1048 card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
1337 card->options.fake_broadcast = 0; 1049 card->options.fake_broadcast = 0;
1338 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1050 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1339 card->options.performance_stats = 0; 1051 card->options.performance_stats = 0;
1340 card->options.rx_sg_cb = QETH_RX_SG_CB; 1052 card->options.rx_sg_cb = QETH_RX_SG_CB;
1341 card->options.isolation = ISOLATION_MODE_NONE; 1053 card->options.isolation = ISOLATION_MODE_NONE;
1342 card->options.cq = QETH_CQ_DISABLED;
1343} 1054}
1344 1055
1345static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1056static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -1359,7 +1070,6 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1359 1070
1360static void qeth_start_kernel_thread(struct work_struct *work) 1071static void qeth_start_kernel_thread(struct work_struct *work)
1361{ 1072{
1362 struct task_struct *ts;
1363 struct qeth_card *card = container_of(work, struct qeth_card, 1073 struct qeth_card *card = container_of(work, struct qeth_card,
1364 kernel_thread_starter); 1074 kernel_thread_starter);
1365 QETH_CARD_TEXT(card , 2, "strthrd"); 1075 QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1367,15 +1077,9 @@ static void qeth_start_kernel_thread(struct work_struct *work)
1367 if (card->read.state != CH_STATE_UP && 1077 if (card->read.state != CH_STATE_UP &&
1368 card->write.state != CH_STATE_UP) 1078 card->write.state != CH_STATE_UP)
1369 return; 1079 return;
1370 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { 1080 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
1371 ts = kthread_run(card->discipline->recover, (void *)card, 1081 kthread_run(card->discipline.recover, (void *) card,
1372 "qeth_recover"); 1082 "qeth_recover");
1373 if (IS_ERR(ts)) {
1374 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1375 qeth_clear_thread_running_bit(card,
1376 QETH_RECOVER_THREAD);
1377 }
1378 }
1379} 1083}
1380 1084
1381static int qeth_setup_card(struct qeth_card *card) 1085static int qeth_setup_card(struct qeth_card *card)
@@ -1415,7 +1119,6 @@ static int qeth_setup_card(struct qeth_card *card)
1415 card->ipato.invert6 = 0; 1119 card->ipato.invert6 = 0;
1416 /* init QDIO stuff */ 1120 /* init QDIO stuff */
1417 qeth_init_qdio_info(card); 1121 qeth_init_qdio_info(card);
1418 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1419 return 0; 1122 return 0;
1420} 1123}
1421 1124
@@ -1437,7 +1140,7 @@ static struct qeth_card *qeth_alloc_card(void)
1437 if (!card) 1140 if (!card)
1438 goto out; 1141 goto out;
1439 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 1142 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1440 card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); 1143 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
1441 if (!card->ip_tbd_list) { 1144 if (!card->ip_tbd_list) {
1442 QETH_DBF_TEXT(SETUP, 0, "iptbdnom"); 1145 QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
1443 goto out_card; 1146 goto out_card;
@@ -1477,10 +1180,9 @@ static int qeth_determine_card_type(struct qeth_card *card)
1477 card->info.type = known_devices[i][QETH_DEV_MODEL_IND]; 1180 card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
1478 card->qdio.no_out_queues = 1181 card->qdio.no_out_queues =
1479 known_devices[i][QETH_QUEUE_NO_IND]; 1182 known_devices[i][QETH_QUEUE_NO_IND];
1480 card->qdio.no_in_queues = 1;
1481 card->info.is_multicast_different = 1183 card->info.is_multicast_different =
1482 known_devices[i][QETH_MULTICAST_IND]; 1184 known_devices[i][QETH_MULTICAST_IND];
1483 qeth_update_from_chp_desc(card); 1185 qeth_get_channel_path_desc(card);
1484 return 0; 1186 return 0;
1485 } 1187 }
1486 i++; 1188 i++;
@@ -1675,8 +1377,7 @@ static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
1675{ 1377{
1676 QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); 1378 QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
1677 1379
1678 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && 1380 if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
1679 (prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
1680 card->info.blkt.time_total = 250; 1381 card->info.blkt.time_total = 250;
1681 card->info.blkt.inter_packet = 5; 1382 card->info.blkt.inter_packet = 5;
1682 card->info.blkt.inter_packet_jumbo = 15; 1383 card->info.blkt.inter_packet_jumbo = 15;
@@ -2036,7 +1737,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2036 if (time_after(jiffies, timeout)) 1737 if (time_after(jiffies, timeout))
2037 goto time_err; 1738 goto time_err;
2038 cpu_relax(); 1739 cpu_relax();
2039 } 1740 };
2040 } 1741 }
2041 1742
2042 if (reply->rc == -EIO) 1743 if (reply->rc == -EIO)
@@ -2279,6 +1980,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2279 unsigned long data) 1980 unsigned long data)
2280{ 1981{
2281 struct qeth_cmd_buffer *iob; 1982 struct qeth_cmd_buffer *iob;
1983 int rc = 0;
2282 1984
2283 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); 1985 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2284 1986
@@ -2294,7 +1996,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2294 iob->rc = -EMLINK; 1996 iob->rc = -EMLINK;
2295 } 1997 }
2296 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); 1998 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
2297 return 0; 1999 return rc;
2298} 2000}
2299 2001
2300static int qeth_ulp_setup(struct qeth_card *card) 2002static int qeth_ulp_setup(struct qeth_card *card)
@@ -2325,37 +2027,6 @@ static int qeth_ulp_setup(struct qeth_card *card)
2325 return rc; 2027 return rc;
2326} 2028}
2327 2029
2328static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2329{
2330 int rc;
2331 struct qeth_qdio_out_buffer *newbuf;
2332
2333 rc = 0;
2334 newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2335 if (!newbuf) {
2336 rc = -ENOMEM;
2337 goto out;
2338 }
2339 newbuf->buffer = &q->qdio_bufs[bidx];
2340 skb_queue_head_init(&newbuf->skb_list);
2341 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2342 newbuf->q = q;
2343 newbuf->aob = NULL;
2344 newbuf->next_pending = q->bufs[bidx];
2345 atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2346 q->bufs[bidx] = newbuf;
2347 if (q->bufstates) {
2348 q->bufstates[bidx].user = newbuf;
2349 QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
2350 QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
2351 QETH_CARD_TEXT_(q->card, 2, "%lx",
2352 (long) newbuf->next_pending);
2353 }
2354out:
2355 return rc;
2356}
2357
2358
2359static int qeth_alloc_qdio_buffers(struct qeth_card *card) 2030static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2360{ 2031{
2361 int i, j; 2032 int i, j;
@@ -2366,63 +2037,52 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2366 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2037 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2367 return 0; 2038 return 0;
2368 2039
2369 card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q), 2040 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
2370 GFP_KERNEL); 2041 GFP_KERNEL);
2371 if (!card->qdio.in_q) 2042 if (!card->qdio.in_q)
2372 goto out_nomem; 2043 goto out_nomem;
2373 QETH_DBF_TEXT(SETUP, 2, "inq"); 2044 QETH_DBF_TEXT(SETUP, 2, "inq");
2374 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *)); 2045 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
2375 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); 2046 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2376 /* give inbound qeth_qdio_buffers their qdio_buffers */ 2047 /* give inbound qeth_qdio_buffers their qdio_buffers */
2377 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 2048 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
2378 card->qdio.in_q->bufs[i].buffer = 2049 card->qdio.in_q->bufs[i].buffer =
2379 &card->qdio.in_q->qdio_bufs[i]; 2050 &card->qdio.in_q->qdio_bufs[i];
2380 card->qdio.in_q->bufs[i].rx_skb = NULL;
2381 }
2382 /* inbound buffer pool */ 2051 /* inbound buffer pool */
2383 if (qeth_alloc_buffer_pool(card)) 2052 if (qeth_alloc_buffer_pool(card))
2384 goto out_freeinq; 2053 goto out_freeinq;
2385
2386 /* outbound */ 2054 /* outbound */
2387 card->qdio.out_qs = 2055 card->qdio.out_qs =
2388 kzalloc(card->qdio.no_out_queues * 2056 kmalloc(card->qdio.no_out_queues *
2389 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); 2057 sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
2390 if (!card->qdio.out_qs) 2058 if (!card->qdio.out_qs)
2391 goto out_freepool; 2059 goto out_freepool;
2392 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2060 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2393 card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q), 2061 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
2394 GFP_KERNEL); 2062 GFP_KERNEL);
2395 if (!card->qdio.out_qs[i]) 2063 if (!card->qdio.out_qs[i])
2396 goto out_freeoutq; 2064 goto out_freeoutq;
2397 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); 2065 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
2398 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); 2066 QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
2067 memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q));
2399 card->qdio.out_qs[i]->queue_no = i; 2068 card->qdio.out_qs[i]->queue_no = i;
2400 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2069 /* give outbound qeth_qdio_buffers their qdio_buffers */
2401 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2070 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2402 WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); 2071 card->qdio.out_qs[i]->bufs[j].buffer =
2403 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) 2072 &card->qdio.out_qs[i]->qdio_bufs[j];
2404 goto out_freeoutqbufs; 2073 skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j].
2074 skb_list);
2075 lockdep_set_class(
2076 &card->qdio.out_qs[i]->bufs[j].skb_list.lock,
2077 &qdio_out_skb_queue_key);
2078 INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list);
2405 } 2079 }
2406 } 2080 }
2407
2408 /* completion */
2409 if (qeth_alloc_cq(card))
2410 goto out_freeoutq;
2411
2412 return 0; 2081 return 0;
2413 2082
2414out_freeoutqbufs:
2415 while (j > 0) {
2416 --j;
2417 kmem_cache_free(qeth_qdio_outbuf_cache,
2418 card->qdio.out_qs[i]->bufs[j]);
2419 card->qdio.out_qs[i]->bufs[j] = NULL;
2420 }
2421out_freeoutq: 2083out_freeoutq:
2422 while (i > 0) { 2084 while (i > 0)
2423 kfree(card->qdio.out_qs[--i]); 2085 kfree(card->qdio.out_qs[--i]);
2424 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2425 }
2426 kfree(card->qdio.out_qs); 2086 kfree(card->qdio.out_qs);
2427 card->qdio.out_qs = NULL; 2087 card->qdio.out_qs = NULL;
2428out_freepool: 2088out_freepool:
@@ -2693,12 +2353,6 @@ static int qeth_init_input_buffer(struct qeth_card *card,
2693 struct qeth_buffer_pool_entry *pool_entry; 2353 struct qeth_buffer_pool_entry *pool_entry;
2694 int i; 2354 int i;
2695 2355
2696 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2697 buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
2698 if (!buf->rx_skb)
2699 return 1;
2700 }
2701
2702 pool_entry = qeth_find_free_buffer_pool_entry(card); 2356 pool_entry = qeth_find_free_buffer_pool_entry(card);
2703 if (!pool_entry) 2357 if (!pool_entry)
2704 return 1; 2358 return 1;
@@ -2745,21 +2399,13 @@ int qeth_init_qdio_queues(struct qeth_card *card)
2745 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 2399 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2746 return rc; 2400 return rc;
2747 } 2401 }
2748
2749 /* completion */
2750 rc = qeth_cq_init(card);
2751 if (rc) {
2752 return rc;
2753 }
2754
2755 /* outbound queue */ 2402 /* outbound queue */
2756 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2403 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2757 memset(card->qdio.out_qs[i]->qdio_bufs, 0, 2404 memset(card->qdio.out_qs[i]->qdio_bufs, 0,
2758 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); 2405 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
2759 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2406 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2760 qeth_clear_output_buffer(card->qdio.out_qs[i], 2407 qeth_clear_output_buffer(card->qdio.out_qs[i],
2761 card->qdio.out_qs[i]->bufs[j], 2408 &card->qdio.out_qs[i]->bufs[j]);
2762 QETH_QDIO_BUF_EMPTY);
2763 } 2409 }
2764 card->qdio.out_qs[i]->card = card; 2410 card->qdio.out_qs[i]->card = card;
2765 card->qdio.out_qs[i]->next_buf_to_fill = 0; 2411 card->qdio.out_qs[i]->next_buf_to_fill = 0;
@@ -2940,33 +2586,16 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
2940 QETH_DBF_TEXT(SETUP, 2, "qipasscb"); 2586 QETH_DBF_TEXT(SETUP, 2, "qipasscb");
2941 2587
2942 cmd = (struct qeth_ipa_cmd *) data; 2588 cmd = (struct qeth_ipa_cmd *) data;
2943
2944 switch (cmd->hdr.return_code) {
2945 case IPA_RC_NOTSUPP:
2946 case IPA_RC_L2_UNSUPPORTED_CMD:
2947 QETH_DBF_TEXT(SETUP, 2, "ipaunsup");
2948 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2949 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2950 return -0;
2951 default:
2952 if (cmd->hdr.return_code) {
2953 QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
2954 "rc=%d\n",
2955 dev_name(&card->gdev->dev),
2956 cmd->hdr.return_code);
2957 return 0;
2958 }
2959 }
2960
2961 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 2589 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2962 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 2590 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2963 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 2591 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2964 } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { 2592 } else {
2965 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 2593 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2966 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; 2594 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2967 } else 2595 }
2968 QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" 2596 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
2969 "\n", dev_name(&card->gdev->dev)); 2597 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_supported);
2598 QETH_DBF_TEXT_(SETUP, 2, "%x", cmd->hdr.ipa_enabled);
2970 return 0; 2599 return 0;
2971} 2600}
2972 2601
@@ -3016,7 +2645,7 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3016 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; 2645 struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3017 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; 2646 struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3018 struct ccw_dev_id ccwid; 2647 struct ccw_dev_id ccwid;
3019 int level; 2648 int level, rc;
3020 2649
3021 tid->chpid = card->info.chpid; 2650 tid->chpid = card->info.chpid;
3022 ccw_device_get_id(CARD_RDEV(card), &ccwid); 2651 ccw_device_get_id(CARD_RDEV(card), &ccwid);
@@ -3024,10 +2653,17 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3024 tid->devno = ccwid.devno; 2653 tid->devno = ccwid.devno;
3025 if (!info) 2654 if (!info)
3026 return; 2655 return;
3027 level = stsi(NULL, 0, 0, 0); 2656
3028 if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) 2657 rc = stsi(NULL, 0, 0, 0);
2658 if (rc == -ENOSYS)
2659 level = rc;
2660 else
2661 level = (((unsigned int) rc) >> 28);
2662
2663 if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
3029 tid->lparnr = info222->lpar_number; 2664 tid->lparnr = info222->lpar_number;
3030 if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { 2665
2666 if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
3031 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); 2667 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3032 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); 2668 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3033 } 2669 }
@@ -3098,19 +2734,9 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
3098} 2734}
3099EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); 2735EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
3100 2736
3101void qeth_buffer_reclaim_work(struct work_struct *work)
3102{
3103 struct qeth_card *card = container_of(work, struct qeth_card,
3104 buffer_reclaim_work.work);
3105
3106 QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3107 qeth_queue_input_buffer(card, card->reclaim_index);
3108}
3109
3110void qeth_queue_input_buffer(struct qeth_card *card, int index) 2737void qeth_queue_input_buffer(struct qeth_card *card, int index)
3111{ 2738{
3112 struct qeth_qdio_q *queue = card->qdio.in_q; 2739 struct qeth_qdio_q *queue = card->qdio.in_q;
3113 struct list_head *lh;
3114 int count; 2740 int count;
3115 int i; 2741 int i;
3116 int rc; 2742 int rc;
@@ -3142,20 +2768,6 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
3142 atomic_add_unless(&card->force_alloc_skb, -1, 0); 2768 atomic_add_unless(&card->force_alloc_skb, -1, 0);
3143 } 2769 }
3144 2770
3145 if (!count) {
3146 i = 0;
3147 list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3148 i++;
3149 if (i == card->qdio.in_buf_pool.buf_count) {
3150 QETH_CARD_TEXT(card, 2, "qsarbw");
3151 card->reclaim_index = index;
3152 schedule_delayed_work(
3153 &card->buffer_reclaim_work,
3154 QETH_RECLAIM_WORK_TIME);
3155 }
3156 return;
3157 }
3158
3159 /* 2771 /*
3160 * according to old code it should be avoided to requeue all 2772 * according to old code it should be avoided to requeue all
3161 * 128 buffers in order to benefit from PCI avoidance. 2773 * 128 buffers in order to benefit from PCI avoidance.
@@ -3175,6 +2787,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
3175 qeth_get_micros() - 2787 qeth_get_micros() -
3176 card->perf_stats.inbound_do_qdio_start_time; 2788 card->perf_stats.inbound_do_qdio_start_time;
3177 if (rc) { 2789 if (rc) {
2790 dev_warn(&card->gdev->dev,
2791 "QDIO reported an error, rc=%i\n", rc);
3178 QETH_CARD_TEXT(card, 2, "qinberr"); 2792 QETH_CARD_TEXT(card, 2, "qinberr");
3179 } 2793 }
3180 queue->next_buf_to_init = (queue->next_buf_to_init + count) % 2794 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
@@ -3248,12 +2862,12 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3248 queue->card->perf_stats.sc_p_dp++; 2862 queue->card->perf_stats.sc_p_dp++;
3249 queue->do_pack = 0; 2863 queue->do_pack = 0;
3250 /* flush packing buffers */ 2864 /* flush packing buffers */
3251 buffer = queue->bufs[queue->next_buf_to_fill]; 2865 buffer = &queue->bufs[queue->next_buf_to_fill];
3252 if ((atomic_read(&buffer->state) == 2866 if ((atomic_read(&buffer->state) ==
3253 QETH_QDIO_BUF_EMPTY) && 2867 QETH_QDIO_BUF_EMPTY) &&
3254 (buffer->next_element_to_fill > 0)) { 2868 (buffer->next_element_to_fill > 0)) {
3255 atomic_set(&buffer->state, 2869 atomic_set(&buffer->state,
3256 QETH_QDIO_BUF_PRIMED); 2870 QETH_QDIO_BUF_PRIMED);
3257 flush_count++; 2871 flush_count++;
3258 queue->next_buf_to_fill = 2872 queue->next_buf_to_fill =
3259 (queue->next_buf_to_fill + 1) % 2873 (queue->next_buf_to_fill + 1) %
@@ -3264,7 +2878,6 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3264 return flush_count; 2878 return flush_count;
3265} 2879}
3266 2880
3267
3268/* 2881/*
3269 * Called to flush a packing buffer if no more pci flags are on the queue. 2882 * Called to flush a packing buffer if no more pci flags are on the queue.
3270 * Checks if there is a packing buffer and prepares it to be flushed. 2883 * Checks if there is a packing buffer and prepares it to be flushed.
@@ -3274,7 +2887,7 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
3274{ 2887{
3275 struct qeth_qdio_out_buffer *buffer; 2888 struct qeth_qdio_out_buffer *buffer;
3276 2889
3277 buffer = queue->bufs[queue->next_buf_to_fill]; 2890 buffer = &queue->bufs[queue->next_buf_to_fill];
3278 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && 2891 if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3279 (buffer->next_element_to_fill > 0)) { 2892 (buffer->next_element_to_fill > 0)) {
3280 /* it's a packing buffer */ 2893 /* it's a packing buffer */
@@ -3295,14 +2908,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3295 unsigned int qdio_flags; 2908 unsigned int qdio_flags;
3296 2909
3297 for (i = index; i < index + count; ++i) { 2910 for (i = index; i < index + count; ++i) {
3298 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 2911 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
3299 buf = queue->bufs[bidx];
3300 buf->buffer->element[buf->next_element_to_fill - 1].eflags |= 2912 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3301 SBAL_EFLAGS_LAST_ENTRY; 2913 SBAL_EFLAGS_LAST_ENTRY;
3302 2914
3303 if (queue->bufstates)
3304 queue->bufstates[bidx].user = buf;
3305
3306 if (queue->card->info.type == QETH_CARD_TYPE_IQD) 2915 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
3307 continue; 2916 continue;
3308 2917
@@ -3351,12 +2960,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3351 if (rc) { 2960 if (rc) {
3352 queue->card->stats.tx_errors += count; 2961 queue->card->stats.tx_errors += count;
3353 /* ignore temporary SIGA errors without busy condition */ 2962 /* ignore temporary SIGA errors without busy condition */
3354 if (rc == -ENOBUFS) 2963 if (rc == QDIO_ERROR_SIGA_TARGET)
3355 return; 2964 return;
3356 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); 2965 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3357 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3358 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3359 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3360 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); 2966 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3361 2967
3362 /* this must not happen under normal circumstances. if it 2968 /* this must not happen under normal circumstances. if it
@@ -3418,120 +3024,14 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3418} 3024}
3419EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); 3025EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
3420 3026
3421int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3422{
3423 int rc;
3424
3425 if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
3426 rc = -1;
3427 goto out;
3428 } else {
3429 if (card->options.cq == cq) {
3430 rc = 0;
3431 goto out;
3432 }
3433
3434 if (card->state != CARD_STATE_DOWN &&
3435 card->state != CARD_STATE_RECOVER) {
3436 rc = -1;
3437 goto out;
3438 }
3439
3440 qeth_free_qdio_buffers(card);
3441 card->options.cq = cq;
3442 rc = 0;
3443 }
3444out:
3445 return rc;
3446
3447}
3448EXPORT_SYMBOL_GPL(qeth_configure_cq);
3449
3450
3451static void qeth_qdio_cq_handler(struct qeth_card *card,
3452 unsigned int qdio_err,
3453 unsigned int queue, int first_element, int count) {
3454 struct qeth_qdio_q *cq = card->qdio.c_q;
3455 int i;
3456 int rc;
3457
3458 if (!qeth_is_cq(card, queue))
3459 goto out;
3460
3461 QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3462 QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3463 QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3464
3465 if (qdio_err) {
3466 netif_stop_queue(card->dev);
3467 qeth_schedule_recovery(card);
3468 goto out;
3469 }
3470
3471 if (card->options.performance_stats) {
3472 card->perf_stats.cq_cnt++;
3473 card->perf_stats.cq_start_time = qeth_get_micros();
3474 }
3475
3476 for (i = first_element; i < first_element + count; ++i) {
3477 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3478 struct qdio_buffer *buffer = &cq->qdio_bufs[bidx];
3479 int e;
3480
3481 e = 0;
3482 while (buffer->element[e].addr) {
3483 unsigned long phys_aob_addr;
3484
3485 phys_aob_addr = (unsigned long) buffer->element[e].addr;
3486 qeth_qdio_handle_aob(card, phys_aob_addr);
3487 buffer->element[e].addr = NULL;
3488 buffer->element[e].eflags = 0;
3489 buffer->element[e].sflags = 0;
3490 buffer->element[e].length = 0;
3491
3492 ++e;
3493 }
3494
3495 buffer->element[15].eflags = 0;
3496 buffer->element[15].sflags = 0;
3497 }
3498 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3499 card->qdio.c_q->next_buf_to_init,
3500 count);
3501 if (rc) {
3502 dev_warn(&card->gdev->dev,
3503 "QDIO reported an error, rc=%i\n", rc);
3504 QETH_CARD_TEXT(card, 2, "qcqherr");
3505 }
3506 card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3507 + count) % QDIO_MAX_BUFFERS_PER_Q;
3508
3509 netif_wake_queue(card->dev);
3510
3511 if (card->options.performance_stats) {
3512 int delta_t = qeth_get_micros();
3513 delta_t -= card->perf_stats.cq_start_time;
3514 card->perf_stats.cq_time += delta_t;
3515 }
3516out:
3517 return;
3518}
3519
3520void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, 3027void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
3521 unsigned int queue, int first_elem, int count, 3028 unsigned int queue, int first_element, int count,
3522 unsigned long card_ptr) 3029 unsigned long card_ptr)
3523{ 3030{
3524 struct qeth_card *card = (struct qeth_card *)card_ptr; 3031 struct qeth_card *card = (struct qeth_card *)card_ptr;
3525 3032
3526 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); 3033 if (qdio_err)
3527 QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3528
3529 if (qeth_is_cq(card, queue))
3530 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3531 else if (qdio_err)
3532 qeth_schedule_recovery(card); 3034 qeth_schedule_recovery(card);
3533
3534
3535} 3035}
3536EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); 3036EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
3537 3037
@@ -3545,7 +3045,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3545 int i; 3045 int i;
3546 3046
3547 QETH_CARD_TEXT(card, 6, "qdouhdl"); 3047 QETH_CARD_TEXT(card, 6, "qdouhdl");
3548 if (qdio_error & QDIO_ERROR_FATAL) { 3048 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
3549 QETH_CARD_TEXT(card, 2, "achkcond"); 3049 QETH_CARD_TEXT(card, 2, "achkcond");
3550 netif_stop_queue(card->dev); 3050 netif_stop_queue(card->dev);
3551 qeth_schedule_recovery(card); 3051 qeth_schedule_recovery(card);
@@ -3557,44 +3057,9 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3557 qeth_get_micros(); 3057 qeth_get_micros();
3558 } 3058 }
3559 for (i = first_element; i < (first_element + count); ++i) { 3059 for (i = first_element; i < (first_element + count); ++i) {
3560 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3060 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
3561 buffer = queue->bufs[bidx];
3562 qeth_handle_send_error(card, buffer, qdio_error); 3061 qeth_handle_send_error(card, buffer, qdio_error);
3563 3062 qeth_clear_output_buffer(queue, buffer);
3564 if (queue->bufstates &&
3565 (queue->bufstates[bidx].flags &
3566 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3567 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3568
3569 if (atomic_cmpxchg(&buffer->state,
3570 QETH_QDIO_BUF_PRIMED,
3571 QETH_QDIO_BUF_PENDING) ==
3572 QETH_QDIO_BUF_PRIMED) {
3573 qeth_notify_skbs(queue, buffer,
3574 TX_NOTIFY_PENDING);
3575 }
3576 buffer->aob = queue->bufstates[bidx].aob;
3577 QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
3578 QETH_CARD_TEXT(queue->card, 5, "aob");
3579 QETH_CARD_TEXT_(queue->card, 5, "%lx",
3580 virt_to_phys(buffer->aob));
3581 if (qeth_init_qdio_out_buf(queue, bidx)) {
3582 QETH_CARD_TEXT(card, 2, "outofbuf");
3583 qeth_schedule_recovery(card);
3584 }
3585 } else {
3586 if (card->options.cq == QETH_CQ_ENABLED) {
3587 enum iucv_tx_notify n;
3588
3589 n = qeth_compute_cq_notification(
3590 buffer->buffer->element[15].sflags, 0);
3591 qeth_notify_skbs(queue, buffer, n);
3592 }
3593
3594 qeth_clear_output_buffer(queue, buffer,
3595 QETH_QDIO_BUF_EMPTY);
3596 }
3597 qeth_cleanup_handled_pending(queue, bidx, 0);
3598 } 3063 }
3599 atomic_sub(count, &queue->used_buffers); 3064 atomic_sub(count, &queue->used_buffers);
3600 /* check if we need to do something on this outbound queue */ 3065 /* check if we need to do something on this outbound queue */
@@ -3739,8 +3204,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3739 3204
3740 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { 3205 for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3741 frag = &skb_shinfo(skb)->frags[cnt]; 3206 frag = &skb_shinfo(skb)->frags[cnt];
3742 buffer->element[element].addr = (char *) 3207 buffer->element[element].addr = (char *)page_to_phys(frag->page)
3743 page_to_phys(skb_frag_page(frag))
3744 + frag->page_offset; 3208 + frag->page_offset;
3745 buffer->element[element].length = frag->size; 3209 buffer->element[element].length = frag->size;
3746 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; 3210 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
@@ -3827,7 +3291,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3827 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3291 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3828 /* ... now we've got the queue */ 3292 /* ... now we've got the queue */
3829 index = queue->next_buf_to_fill; 3293 index = queue->next_buf_to_fill;
3830 buffer = queue->bufs[queue->next_buf_to_fill]; 3294 buffer = &queue->bufs[queue->next_buf_to_fill];
3831 /* 3295 /*
3832 * check if buffer is empty to make sure that we do not 'overtake' 3296 * check if buffer is empty to make sure that we do not 'overtake'
3833 * ourselves and try to fill a buffer that is already primed 3297 * ourselves and try to fill a buffer that is already primed
@@ -3861,7 +3325,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3861 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3325 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3862 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); 3326 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3863 start_index = queue->next_buf_to_fill; 3327 start_index = queue->next_buf_to_fill;
3864 buffer = queue->bufs[queue->next_buf_to_fill]; 3328 buffer = &queue->bufs[queue->next_buf_to_fill];
3865 /* 3329 /*
3866 * check if buffer is empty to make sure that we do not 'overtake' 3330 * check if buffer is empty to make sure that we do not 'overtake'
3867 * ourselves and try to fill a buffer that is already primed 3331 * ourselves and try to fill a buffer that is already primed
@@ -3883,7 +3347,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3883 queue->next_buf_to_fill = 3347 queue->next_buf_to_fill =
3884 (queue->next_buf_to_fill + 1) % 3348 (queue->next_buf_to_fill + 1) %
3885 QDIO_MAX_BUFFERS_PER_Q; 3349 QDIO_MAX_BUFFERS_PER_Q;
3886 buffer = queue->bufs[queue->next_buf_to_fill]; 3350 buffer = &queue->bufs[queue->next_buf_to_fill];
3887 /* we did a step forward, so check buffer state 3351 /* we did a step forward, so check buffer state
3888 * again */ 3352 * again */
3889 if (atomic_read(&buffer->state) != 3353 if (atomic_read(&buffer->state) !=
@@ -4334,7 +3798,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
4334 /* check if there is enough room in userspace */ 3798 /* check if there is enough room in userspace */
4335 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { 3799 if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4336 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); 3800 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
4337 cmd->hdr.return_code = IPA_RC_ENOMEM; 3801 cmd->hdr.return_code = -ENOMEM;
4338 return 0; 3802 return 0;
4339 } 3803 }
4340 QETH_CARD_TEXT_(card, 4, "snore%i", 3804 QETH_CARD_TEXT_(card, 4, "snore%i",
@@ -4417,104 +3881,6 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4417} 3881}
4418EXPORT_SYMBOL_GPL(qeth_snmp_command); 3882EXPORT_SYMBOL_GPL(qeth_snmp_command);
4419 3883
4420static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4421 struct qeth_reply *reply, unsigned long data)
4422{
4423 struct qeth_ipa_cmd *cmd;
4424 struct qeth_qoat_priv *priv;
4425 char *resdata;
4426 int resdatalen;
4427
4428 QETH_CARD_TEXT(card, 3, "qoatcb");
4429
4430 cmd = (struct qeth_ipa_cmd *)data;
4431 priv = (struct qeth_qoat_priv *)reply->param;
4432 resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4433 resdata = (char *)data + 28;
4434
4435 if (resdatalen > (priv->buffer_len - priv->response_len)) {
4436 cmd->hdr.return_code = IPA_RC_FFFF;
4437 return 0;
4438 }
4439
4440 memcpy((priv->buffer + priv->response_len), resdata,
4441 resdatalen);
4442 priv->response_len += resdatalen;
4443
4444 if (cmd->data.setadapterparms.hdr.seq_no <
4445 cmd->data.setadapterparms.hdr.used_total)
4446 return 1;
4447 return 0;
4448}
4449
4450int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4451{
4452 int rc = 0;
4453 struct qeth_cmd_buffer *iob;
4454 struct qeth_ipa_cmd *cmd;
4455 struct qeth_query_oat *oat_req;
4456 struct qeth_query_oat_data oat_data;
4457 struct qeth_qoat_priv priv;
4458 void __user *tmp;
4459
4460 QETH_CARD_TEXT(card, 3, "qoatcmd");
4461
4462 if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4463 rc = -EOPNOTSUPP;
4464 goto out;
4465 }
4466
4467 if (copy_from_user(&oat_data, udata,
4468 sizeof(struct qeth_query_oat_data))) {
4469 rc = -EFAULT;
4470 goto out;
4471 }
4472
4473 priv.buffer_len = oat_data.buffer_len;
4474 priv.response_len = 0;
4475 priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
4476 if (!priv.buffer) {
4477 rc = -ENOMEM;
4478 goto out;
4479 }
4480
4481 iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4482 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
4483 sizeof(struct qeth_query_oat));
4484 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
4485 oat_req = &cmd->data.setadapterparms.data.query_oat;
4486 oat_req->subcmd_code = oat_data.command;
4487
4488 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4489 &priv);
4490 if (!rc) {
4491 if (is_compat_task())
4492 tmp = compat_ptr(oat_data.ptr);
4493 else
4494 tmp = (void __user *)(unsigned long)oat_data.ptr;
4495
4496 if (copy_to_user(tmp, priv.buffer,
4497 priv.response_len)) {
4498 rc = -EFAULT;
4499 goto out_free;
4500 }
4501
4502 oat_data.response_len = priv.response_len;
4503
4504 if (copy_to_user(udata, &oat_data,
4505 sizeof(struct qeth_query_oat_data)))
4506 rc = -EFAULT;
4507 } else
4508 if (rc == IPA_RC_FFFF)
4509 rc = -EFAULT;
4510
4511out_free:
4512 kfree(priv.buffer);
4513out:
4514 return rc;
4515}
4516EXPORT_SYMBOL_GPL(qeth_query_oat_command);
4517
4518static inline int qeth_get_qdio_q_format(struct qeth_card *card) 3884static inline int qeth_get_qdio_q_format(struct qeth_card *card)
4519{ 3885{
4520 switch (card->info.type) { 3886 switch (card->info.type) {
@@ -4552,28 +3918,13 @@ static void qeth_determine_capabilities(struct qeth_card *card)
4552 goto out_offline; 3918 goto out_offline;
4553 } 3919 }
4554 qeth_configure_unitaddr(card, prcd); 3920 qeth_configure_unitaddr(card, prcd);
4555 if (ddev_offline) 3921 qeth_configure_blkt_default(card, prcd);
4556 qeth_configure_blkt_default(card, prcd);
4557 kfree(prcd); 3922 kfree(prcd);
4558 3923
4559 rc = qdio_get_ssqd_desc(ddev, &card->ssqd); 3924 rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4560 if (rc) 3925 if (rc)
4561 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3926 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
4562 3927
4563 QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
4564 QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1);
4565 QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3);
4566 QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
4567 if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4568 ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4569 ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4570 dev_info(&card->gdev->dev,
4571 "Completion Queueing supported\n");
4572 } else {
4573 card->options.cq = QETH_CQ_NOTAVAILABLE;
4574 }
4575
4576
4577out_offline: 3928out_offline:
4578 if (ddev_offline == 1) 3929 if (ddev_offline == 1)
4579 ccw_device_set_offline(ddev); 3930 ccw_device_set_offline(ddev);
@@ -4581,30 +3932,11 @@ out:
4581 return; 3932 return;
4582} 3933}
4583 3934
4584static inline void qeth_qdio_establish_cq(struct qeth_card *card,
4585 struct qdio_buffer **in_sbal_ptrs,
4586 void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) {
4587 int i;
4588
4589 if (card->options.cq == QETH_CQ_ENABLED) {
4590 int offset = QDIO_MAX_BUFFERS_PER_Q *
4591 (card->qdio.no_in_queues - 1);
4592 i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1);
4593 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
4594 in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
4595 virt_to_phys(card->qdio.c_q->bufs[i].buffer);
4596 }
4597
4598 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4599 }
4600}
4601
4602static int qeth_qdio_establish(struct qeth_card *card) 3935static int qeth_qdio_establish(struct qeth_card *card)
4603{ 3936{
4604 struct qdio_initialize init_data; 3937 struct qdio_initialize init_data;
4605 char *qib_param_field; 3938 char *qib_param_field;
4606 struct qdio_buffer **in_sbal_ptrs; 3939 struct qdio_buffer **in_sbal_ptrs;
4607 void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4608 struct qdio_buffer **out_sbal_ptrs; 3940 struct qdio_buffer **out_sbal_ptrs;
4609 int i, j, k; 3941 int i, j, k;
4610 int rc = 0; 3942 int rc = 0;
@@ -4613,48 +3945,34 @@ static int qeth_qdio_establish(struct qeth_card *card)
4613 3945
4614 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), 3946 qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
4615 GFP_KERNEL); 3947 GFP_KERNEL);
4616 if (!qib_param_field) { 3948 if (!qib_param_field)
4617 rc = -ENOMEM; 3949 return -ENOMEM;
4618 goto out_free_nothing;
4619 }
4620 3950
4621 qeth_create_qib_param_field(card, qib_param_field); 3951 qeth_create_qib_param_field(card, qib_param_field);
4622 qeth_create_qib_param_field_blkt(card, qib_param_field); 3952 qeth_create_qib_param_field_blkt(card, qib_param_field);
4623 3953
4624 in_sbal_ptrs = kzalloc(card->qdio.no_in_queues * 3954 in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
4625 QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
4626 GFP_KERNEL); 3955 GFP_KERNEL);
4627 if (!in_sbal_ptrs) { 3956 if (!in_sbal_ptrs) {
4628 rc = -ENOMEM; 3957 kfree(qib_param_field);
4629 goto out_free_qib_param; 3958 return -ENOMEM;
4630 } 3959 }
4631 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) { 3960 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
4632 in_sbal_ptrs[i] = (struct qdio_buffer *) 3961 in_sbal_ptrs[i] = (struct qdio_buffer *)
4633 virt_to_phys(card->qdio.in_q->bufs[i].buffer); 3962 virt_to_phys(card->qdio.in_q->bufs[i].buffer);
4634 }
4635
4636 queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues,
4637 GFP_KERNEL);
4638 if (!queue_start_poll) {
4639 rc = -ENOMEM;
4640 goto out_free_in_sbals;
4641 }
4642 for (i = 0; i < card->qdio.no_in_queues; ++i)
4643 queue_start_poll[i] = card->discipline->start_poll;
4644
4645 qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4646 3963
4647 out_sbal_ptrs = 3964 out_sbal_ptrs =
4648 kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * 3965 kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
4649 sizeof(void *), GFP_KERNEL); 3966 sizeof(void *), GFP_KERNEL);
4650 if (!out_sbal_ptrs) { 3967 if (!out_sbal_ptrs) {
4651 rc = -ENOMEM; 3968 kfree(in_sbal_ptrs);
4652 goto out_free_queue_start_poll; 3969 kfree(qib_param_field);
3970 return -ENOMEM;
4653 } 3971 }
4654 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) 3972 for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4655 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) { 3973 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
4656 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys( 3974 out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
4657 card->qdio.out_qs[i]->bufs[j]->buffer); 3975 card->qdio.out_qs[i]->bufs[j].buffer);
4658 } 3976 }
4659 3977
4660 memset(&init_data, 0, sizeof(struct qdio_initialize)); 3978 memset(&init_data, 0, sizeof(struct qdio_initialize));
@@ -4662,15 +3980,14 @@ static int qeth_qdio_establish(struct qeth_card *card)
4662 init_data.q_format = qeth_get_qdio_q_format(card); 3980 init_data.q_format = qeth_get_qdio_q_format(card);
4663 init_data.qib_param_field_format = 0; 3981 init_data.qib_param_field_format = 0;
4664 init_data.qib_param_field = qib_param_field; 3982 init_data.qib_param_field = qib_param_field;
4665 init_data.no_input_qs = card->qdio.no_in_queues; 3983 init_data.no_input_qs = 1;
4666 init_data.no_output_qs = card->qdio.no_out_queues; 3984 init_data.no_output_qs = card->qdio.no_out_queues;
4667 init_data.input_handler = card->discipline->input_handler; 3985 init_data.input_handler = card->discipline.input_handler;
4668 init_data.output_handler = card->discipline->output_handler; 3986 init_data.output_handler = card->discipline.output_handler;
4669 init_data.queue_start_poll_array = queue_start_poll; 3987 init_data.queue_start_poll = card->discipline.start_poll;
4670 init_data.int_parm = (unsigned long) card; 3988 init_data.int_parm = (unsigned long) card;
4671 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; 3989 init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
4672 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; 3990 init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
4673 init_data.output_sbal_state_array = card->qdio.out_bufstates;
4674 init_data.scan_threshold = 3991 init_data.scan_threshold =
4675 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32; 3992 (card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
4676 3993
@@ -4687,26 +4004,10 @@ static int qeth_qdio_establish(struct qeth_card *card)
4687 qdio_free(CARD_DDEV(card)); 4004 qdio_free(CARD_DDEV(card));
4688 } 4005 }
4689 } 4006 }
4690
4691 switch (card->options.cq) {
4692 case QETH_CQ_ENABLED:
4693 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4694 break;
4695 case QETH_CQ_DISABLED:
4696 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4697 break;
4698 default:
4699 break;
4700 }
4701out: 4007out:
4702 kfree(out_sbal_ptrs); 4008 kfree(out_sbal_ptrs);
4703out_free_queue_start_poll:
4704 kfree(queue_start_poll);
4705out_free_in_sbals:
4706 kfree(in_sbal_ptrs); 4009 kfree(in_sbal_ptrs);
4707out_free_qib_param:
4708 kfree(qib_param_field); 4010 kfree(qib_param_field);
4709out_free_nothing:
4710 return rc; 4011 return rc;
4711} 4012}
4712 4013
@@ -4725,19 +4026,6 @@ static void qeth_core_free_card(struct qeth_card *card)
4725 kfree(card); 4026 kfree(card);
4726} 4027}
4727 4028
4728void qeth_trace_features(struct qeth_card *card)
4729{
4730 QETH_CARD_TEXT(card, 2, "features");
4731 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.supported_funcs);
4732 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.enabled_funcs);
4733 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.supported_funcs);
4734 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.enabled_funcs);
4735 QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.supported_funcs);
4736 QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.enabled_funcs);
4737 QETH_CARD_TEXT_(card, 2, "%x", card->info.diagass_support);
4738}
4739EXPORT_SYMBOL_GPL(qeth_trace_features);
4740
4741static struct ccw_device_id qeth_ids[] = { 4029static struct ccw_device_id qeth_ids[] = {
4742 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 4030 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4743 .driver_info = QETH_CARD_TYPE_OSD}, 4031 .driver_info = QETH_CARD_TYPE_OSD},
@@ -4763,6 +4051,13 @@ static struct ccw_driver qeth_ccw_driver = {
4763 .remove = ccwgroup_remove_ccwdev, 4051 .remove = ccwgroup_remove_ccwdev,
4764}; 4052};
4765 4053
4054static int qeth_core_driver_group(const char *buf, struct device *root_dev,
4055 unsigned long driver_id)
4056{
4057 return ccwgroup_create_from_string(root_dev, driver_id,
4058 &qeth_ccw_driver, 3, buf);
4059}
4060
4766int qeth_core_hardsetup_card(struct qeth_card *card) 4061int qeth_core_hardsetup_card(struct qeth_card *card)
4767{ 4062{
4768 int retries = 0; 4063 int retries = 0;
@@ -4770,7 +4065,7 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
4770 4065
4771 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 4066 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
4772 atomic_set(&card->force_alloc_skb, 0); 4067 atomic_set(&card->force_alloc_skb, 0);
4773 qeth_update_from_chp_desc(card); 4068 qeth_get_channel_path_desc(card);
4774retry: 4069retry:
4775 if (retries) 4070 if (retries)
4776 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 4071 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -4849,36 +4144,29 @@ out:
4849} 4144}
4850EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); 4145EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
4851 4146
4852static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, 4147static inline int qeth_create_skb_frag(struct qdio_buffer_element *element,
4853 struct qdio_buffer_element *element,
4854 struct sk_buff **pskb, int offset, int *pfrag, int data_len) 4148 struct sk_buff **pskb, int offset, int *pfrag, int data_len)
4855{ 4149{
4856 struct page *page = virt_to_page(element->addr); 4150 struct page *page = virt_to_page(element->addr);
4857 if (*pskb == NULL) { 4151 if (*pskb == NULL) {
4858 if (qethbuffer->rx_skb) { 4152 /* the upper protocol layers assume that there is data in the
4859 /* only if qeth_card.options.cq == QETH_CQ_ENABLED */ 4153 * skb itself. Copy a small amount (64 bytes) to make them
4860 *pskb = qethbuffer->rx_skb; 4154 * happy. */
4861 qethbuffer->rx_skb = NULL; 4155 *pskb = dev_alloc_skb(64 + ETH_HLEN);
4862 } else { 4156 if (!(*pskb))
4863 *pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN); 4157 return -ENOMEM;
4864 if (!(*pskb))
4865 return -ENOMEM;
4866 }
4867
4868 skb_reserve(*pskb, ETH_HLEN); 4158 skb_reserve(*pskb, ETH_HLEN);
4869 if (data_len <= QETH_RX_PULL_LEN) { 4159 if (data_len <= 64) {
4870 memcpy(skb_put(*pskb, data_len), element->addr + offset, 4160 memcpy(skb_put(*pskb, data_len), element->addr + offset,
4871 data_len); 4161 data_len);
4872 } else { 4162 } else {
4873 get_page(page); 4163 get_page(page);
4874 memcpy(skb_put(*pskb, QETH_RX_PULL_LEN), 4164 memcpy(skb_put(*pskb, 64), element->addr + offset, 64);
4875 element->addr + offset, QETH_RX_PULL_LEN); 4165 skb_fill_page_desc(*pskb, *pfrag, page, offset + 64,
4876 skb_fill_page_desc(*pskb, *pfrag, page, 4166 data_len - 64);
4877 offset + QETH_RX_PULL_LEN, 4167 (*pskb)->data_len += data_len - 64;
4878 data_len - QETH_RX_PULL_LEN); 4168 (*pskb)->len += data_len - 64;
4879 (*pskb)->data_len += data_len - QETH_RX_PULL_LEN; 4169 (*pskb)->truesize += data_len - 64;
4880 (*pskb)->len += data_len - QETH_RX_PULL_LEN;
4881 (*pskb)->truesize += data_len - QETH_RX_PULL_LEN;
4882 (*pfrag)++; 4170 (*pfrag)++;
4883 } 4171 }
4884 } else { 4172 } else {
@@ -4889,18 +4177,15 @@ static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
4889 (*pskb)->truesize += data_len; 4177 (*pskb)->truesize += data_len;
4890 (*pfrag)++; 4178 (*pfrag)++;
4891 } 4179 }
4892
4893
4894 return 0; 4180 return 0;
4895} 4181}
4896 4182
4897struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, 4183struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4898 struct qeth_qdio_buffer *qethbuffer, 4184 struct qdio_buffer *buffer,
4899 struct qdio_buffer_element **__element, int *__offset, 4185 struct qdio_buffer_element **__element, int *__offset,
4900 struct qeth_hdr **hdr) 4186 struct qeth_hdr **hdr)
4901{ 4187{
4902 struct qdio_buffer_element *element = *__element; 4188 struct qdio_buffer_element *element = *__element;
4903 struct qdio_buffer *buffer = qethbuffer->buffer;
4904 int offset = *__offset; 4189 int offset = *__offset;
4905 struct sk_buff *skb = NULL; 4190 struct sk_buff *skb = NULL;
4906 int skb_len = 0; 4191 int skb_len = 0;
@@ -4928,7 +4213,11 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4928 break; 4213 break;
4929 case QETH_HEADER_TYPE_LAYER3: 4214 case QETH_HEADER_TYPE_LAYER3:
4930 skb_len = (*hdr)->hdr.l3.length; 4215 skb_len = (*hdr)->hdr.l3.length;
4931 headroom = ETH_HLEN; 4216 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
4217 (card->info.link_type == QETH_LINK_TYPE_HSTR))
4218 headroom = TR_HLEN;
4219 else
4220 headroom = ETH_HLEN;
4932 break; 4221 break;
4933 case QETH_HEADER_TYPE_OSN: 4222 case QETH_HEADER_TYPE_OSN:
4934 skb_len = (*hdr)->hdr.osn.pdu_length; 4223 skb_len = (*hdr)->hdr.osn.pdu_length;
@@ -4941,10 +4230,9 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4941 if (!skb_len) 4230 if (!skb_len)
4942 return NULL; 4231 return NULL;
4943 4232
4944 if (((skb_len >= card->options.rx_sg_cb) && 4233 if ((skb_len >= card->options.rx_sg_cb) &&
4945 (!(card->info.type == QETH_CARD_TYPE_OSN)) && 4234 (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
4946 (!atomic_read(&card->force_alloc_skb))) || 4235 (!atomic_read(&card->force_alloc_skb))) {
4947 (card->options.cq == QETH_CQ_ENABLED)) {
4948 use_rx_sg = 1; 4236 use_rx_sg = 1;
4949 } else { 4237 } else {
4950 skb = dev_alloc_skb(skb_len + headroom); 4238 skb = dev_alloc_skb(skb_len + headroom);
@@ -4959,8 +4247,8 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
4959 data_len = min(skb_len, (int)(element->length - offset)); 4247 data_len = min(skb_len, (int)(element->length - offset));
4960 if (data_len) { 4248 if (data_len) {
4961 if (use_rx_sg) { 4249 if (use_rx_sg) {
4962 if (qeth_create_skb_frag(qethbuffer, element, 4250 if (qeth_create_skb_frag(element, &skb, offset,
4963 &skb, offset, &frag, data_len)) 4251 &frag, data_len))
4964 goto no_mem; 4252 goto no_mem;
4965 } else { 4253 } else {
4966 memcpy(skb_put(skb, data_len), data_ptr, 4254 memcpy(skb_put(skb, data_len), data_ptr,
@@ -5056,44 +4344,35 @@ int qeth_core_load_discipline(struct qeth_card *card,
5056 enum qeth_discipline_id discipline) 4344 enum qeth_discipline_id discipline)
5057{ 4345{
5058 int rc = 0; 4346 int rc = 0;
5059 mutex_lock(&qeth_mod_mutex);
5060 switch (discipline) { 4347 switch (discipline) {
5061 case QETH_DISCIPLINE_LAYER3: 4348 case QETH_DISCIPLINE_LAYER3:
5062 card->discipline = try_then_request_module( 4349 card->discipline.ccwgdriver = try_then_request_module(
5063 symbol_get(qeth_l3_discipline), "qeth_l3"); 4350 symbol_get(qeth_l3_ccwgroup_driver),
4351 "qeth_l3");
5064 break; 4352 break;
5065 case QETH_DISCIPLINE_LAYER2: 4353 case QETH_DISCIPLINE_LAYER2:
5066 card->discipline = try_then_request_module( 4354 card->discipline.ccwgdriver = try_then_request_module(
5067 symbol_get(qeth_l2_discipline), "qeth_l2"); 4355 symbol_get(qeth_l2_ccwgroup_driver),
4356 "qeth_l2");
5068 break; 4357 break;
5069 } 4358 }
5070 if (!card->discipline) { 4359 if (!card->discipline.ccwgdriver) {
5071 dev_err(&card->gdev->dev, "There is no kernel module to " 4360 dev_err(&card->gdev->dev, "There is no kernel module to "
5072 "support discipline %d\n", discipline); 4361 "support discipline %d\n", discipline);
5073 rc = -EINVAL; 4362 rc = -EINVAL;
5074 } 4363 }
5075 mutex_unlock(&qeth_mod_mutex);
5076 return rc; 4364 return rc;
5077} 4365}
5078 4366
5079void qeth_core_free_discipline(struct qeth_card *card) 4367void qeth_core_free_discipline(struct qeth_card *card)
5080{ 4368{
5081 if (card->options.layer2) 4369 if (card->options.layer2)
5082 symbol_put(qeth_l2_discipline); 4370 symbol_put(qeth_l2_ccwgroup_driver);
5083 else 4371 else
5084 symbol_put(qeth_l3_discipline); 4372 symbol_put(qeth_l3_ccwgroup_driver);
5085 card->discipline = NULL; 4373 card->discipline.ccwgdriver = NULL;
5086} 4374}
5087 4375
5088static const struct device_type qeth_generic_devtype = {
5089 .name = "qeth_generic",
5090 .groups = qeth_generic_attr_groups,
5091};
5092static const struct device_type qeth_osn_devtype = {
5093 .name = "qeth_osn",
5094 .groups = qeth_osn_attr_groups,
5095};
5096
5097static int qeth_core_probe_device(struct ccwgroup_device *gdev) 4376static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5098{ 4377{
5099 struct qeth_card *card; 4378 struct qeth_card *card;
@@ -5148,17 +4427,18 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5148 } 4427 }
5149 4428
5150 if (card->info.type == QETH_CARD_TYPE_OSN) 4429 if (card->info.type == QETH_CARD_TYPE_OSN)
5151 gdev->dev.type = &qeth_osn_devtype; 4430 rc = qeth_core_create_osn_attributes(dev);
5152 else 4431 else
5153 gdev->dev.type = &qeth_generic_devtype; 4432 rc = qeth_core_create_device_attributes(dev);
5154 4433 if (rc)
4434 goto err_dbf;
5155 switch (card->info.type) { 4435 switch (card->info.type) {
5156 case QETH_CARD_TYPE_OSN: 4436 case QETH_CARD_TYPE_OSN:
5157 case QETH_CARD_TYPE_OSM: 4437 case QETH_CARD_TYPE_OSM:
5158 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2); 4438 rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
5159 if (rc) 4439 if (rc)
5160 goto err_dbf; 4440 goto err_attr;
5161 rc = card->discipline->setup(card->gdev); 4441 rc = card->discipline.ccwgdriver->probe(card->gdev);
5162 if (rc) 4442 if (rc)
5163 goto err_disc; 4443 goto err_disc;
5164 case QETH_CARD_TYPE_OSD: 4444 case QETH_CARD_TYPE_OSD:
@@ -5176,6 +4456,11 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5176 4456
5177err_disc: 4457err_disc:
5178 qeth_core_free_discipline(card); 4458 qeth_core_free_discipline(card);
4459err_attr:
4460 if (card->info.type == QETH_CARD_TYPE_OSN)
4461 qeth_core_remove_osn_attributes(dev);
4462 else
4463 qeth_core_remove_device_attributes(dev);
5179err_dbf: 4464err_dbf:
5180 debug_unregister(card->debug); 4465 debug_unregister(card->debug);
5181err_card: 4466err_card:
@@ -5192,8 +4477,14 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5192 4477
5193 QETH_DBF_TEXT(SETUP, 2, "removedv"); 4478 QETH_DBF_TEXT(SETUP, 2, "removedv");
5194 4479
5195 if (card->discipline) { 4480 if (card->info.type == QETH_CARD_TYPE_OSN) {
5196 card->discipline->remove(gdev); 4481 qeth_core_remove_osn_attributes(&gdev->dev);
4482 } else {
4483 qeth_core_remove_device_attributes(&gdev->dev);
4484 }
4485
4486 if (card->discipline.ccwgdriver) {
4487 card->discipline.ccwgdriver->remove(gdev);
5197 qeth_core_free_discipline(card); 4488 qeth_core_free_discipline(card);
5198 } 4489 }
5199 4490
@@ -5213,7 +4504,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
5213 int rc = 0; 4504 int rc = 0;
5214 int def_discipline; 4505 int def_discipline;
5215 4506
5216 if (!card->discipline) { 4507 if (!card->discipline.ccwgdriver) {
5217 if (card->info.type == QETH_CARD_TYPE_IQD) 4508 if (card->info.type == QETH_CARD_TYPE_IQD)
5218 def_discipline = QETH_DISCIPLINE_LAYER3; 4509 def_discipline = QETH_DISCIPLINE_LAYER3;
5219 else 4510 else
@@ -5221,11 +4512,11 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
5221 rc = qeth_core_load_discipline(card, def_discipline); 4512 rc = qeth_core_load_discipline(card, def_discipline);
5222 if (rc) 4513 if (rc)
5223 goto err; 4514 goto err;
5224 rc = card->discipline->setup(card->gdev); 4515 rc = card->discipline.ccwgdriver->probe(card->gdev);
5225 if (rc) 4516 if (rc)
5226 goto err; 4517 goto err;
5227 } 4518 }
5228 rc = card->discipline->set_online(gdev); 4519 rc = card->discipline.ccwgdriver->set_online(gdev);
5229err: 4520err:
5230 return rc; 4521 return rc;
5231} 4522}
@@ -5233,52 +4524,58 @@ err:
5233static int qeth_core_set_offline(struct ccwgroup_device *gdev) 4524static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5234{ 4525{
5235 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4526 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5236 return card->discipline->set_offline(gdev); 4527 return card->discipline.ccwgdriver->set_offline(gdev);
5237} 4528}
5238 4529
5239static void qeth_core_shutdown(struct ccwgroup_device *gdev) 4530static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5240{ 4531{
5241 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4532 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5242 if (card->discipline && card->discipline->shutdown) 4533 if (card->discipline.ccwgdriver &&
5243 card->discipline->shutdown(gdev); 4534 card->discipline.ccwgdriver->shutdown)
4535 card->discipline.ccwgdriver->shutdown(gdev);
5244} 4536}
5245 4537
5246static int qeth_core_prepare(struct ccwgroup_device *gdev) 4538static int qeth_core_prepare(struct ccwgroup_device *gdev)
5247{ 4539{
5248 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4540 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5249 if (card->discipline && card->discipline->prepare) 4541 if (card->discipline.ccwgdriver &&
5250 return card->discipline->prepare(gdev); 4542 card->discipline.ccwgdriver->prepare)
4543 return card->discipline.ccwgdriver->prepare(gdev);
5251 return 0; 4544 return 0;
5252} 4545}
5253 4546
5254static void qeth_core_complete(struct ccwgroup_device *gdev) 4547static void qeth_core_complete(struct ccwgroup_device *gdev)
5255{ 4548{
5256 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4549 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5257 if (card->discipline && card->discipline->complete) 4550 if (card->discipline.ccwgdriver &&
5258 card->discipline->complete(gdev); 4551 card->discipline.ccwgdriver->complete)
4552 card->discipline.ccwgdriver->complete(gdev);
5259} 4553}
5260 4554
5261static int qeth_core_freeze(struct ccwgroup_device *gdev) 4555static int qeth_core_freeze(struct ccwgroup_device *gdev)
5262{ 4556{
5263 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4557 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5264 if (card->discipline && card->discipline->freeze) 4558 if (card->discipline.ccwgdriver &&
5265 return card->discipline->freeze(gdev); 4559 card->discipline.ccwgdriver->freeze)
4560 return card->discipline.ccwgdriver->freeze(gdev);
5266 return 0; 4561 return 0;
5267} 4562}
5268 4563
5269static int qeth_core_thaw(struct ccwgroup_device *gdev) 4564static int qeth_core_thaw(struct ccwgroup_device *gdev)
5270{ 4565{
5271 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4566 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5272 if (card->discipline && card->discipline->thaw) 4567 if (card->discipline.ccwgdriver &&
5273 return card->discipline->thaw(gdev); 4568 card->discipline.ccwgdriver->thaw)
4569 return card->discipline.ccwgdriver->thaw(gdev);
5274 return 0; 4570 return 0;
5275} 4571}
5276 4572
5277static int qeth_core_restore(struct ccwgroup_device *gdev) 4573static int qeth_core_restore(struct ccwgroup_device *gdev)
5278{ 4574{
5279 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4575 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5280 if (card->discipline && card->discipline->restore) 4576 if (card->discipline.ccwgdriver &&
5281 return card->discipline->restore(gdev); 4577 card->discipline.ccwgdriver->restore)
4578 return card->discipline.ccwgdriver->restore(gdev);
5282 return 0; 4579 return 0;
5283} 4580}
5284 4581
@@ -5287,7 +4584,8 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5287 .owner = THIS_MODULE, 4584 .owner = THIS_MODULE,
5288 .name = "qeth", 4585 .name = "qeth",
5289 }, 4586 },
5290 .setup = qeth_core_probe_device, 4587 .driver_id = 0xD8C5E3C8,
4588 .probe = qeth_core_probe_device,
5291 .remove = qeth_core_remove_device, 4589 .remove = qeth_core_remove_device,
5292 .set_online = qeth_core_set_online, 4590 .set_online = qeth_core_set_online,
5293 .set_offline = qeth_core_set_offline, 4591 .set_offline = qeth_core_set_offline,
@@ -5299,29 +4597,20 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5299 .restore = qeth_core_restore, 4597 .restore = qeth_core_restore,
5300}; 4598};
5301 4599
5302static ssize_t qeth_core_driver_group_store(struct device_driver *ddrv, 4600static ssize_t
5303 const char *buf, size_t count) 4601qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
4602 size_t count)
5304{ 4603{
5305 int err; 4604 int err;
5306 4605 err = qeth_core_driver_group(buf, qeth_core_root_dev,
5307 err = ccwgroup_create_dev(qeth_core_root_dev, 4606 qeth_core_ccwgroup_driver.driver_id);
5308 &qeth_core_ccwgroup_driver, 3, buf); 4607 if (err)
5309 4608 return err;
5310 return err ? err : count; 4609 else
4610 return count;
5311} 4611}
5312static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
5313 4612
5314static struct attribute *qeth_drv_attrs[] = { 4613static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
5315 &driver_attr_group.attr,
5316 NULL,
5317};
5318static struct attribute_group qeth_drv_attr_group = {
5319 .attrs = qeth_drv_attrs,
5320};
5321static const struct attribute_group *qeth_drv_attr_groups[] = {
5322 &qeth_drv_attr_group,
5323 NULL,
5324};
5325 4614
5326static struct { 4615static struct {
5327 const char str[ETH_GSTRING_LEN]; 4616 const char str[ETH_GSTRING_LEN];
@@ -5361,8 +4650,6 @@ static struct {
5361 {"tx do_QDIO count"}, 4650 {"tx do_QDIO count"},
5362 {"tx csum"}, 4651 {"tx csum"},
5363 {"tx lin"}, 4652 {"tx lin"},
5364 {"cq handler count"},
5365 {"cq handler time"}
5366}; 4653};
5367 4654
5368int qeth_core_get_sset_count(struct net_device *dev, int stringset) 4655int qeth_core_get_sset_count(struct net_device *dev, int stringset)
@@ -5421,8 +4708,6 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
5421 data[32] = card->perf_stats.outbound_do_qdio_cnt; 4708 data[32] = card->perf_stats.outbound_do_qdio_cnt;
5422 data[33] = card->perf_stats.tx_csum; 4709 data[33] = card->perf_stats.tx_csum;
5423 data[34] = card->perf_stats.tx_lin; 4710 data[34] = card->perf_stats.tx_lin;
5424 data[35] = card->perf_stats.cq_cnt;
5425 data[36] = card->perf_stats.cq_time;
5426} 4711}
5427EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 4712EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
5428 4713
@@ -5555,46 +4840,44 @@ static int __init qeth_core_init(void)
5555 pr_info("loading core functions\n"); 4840 pr_info("loading core functions\n");
5556 INIT_LIST_HEAD(&qeth_core_card_list.list); 4841 INIT_LIST_HEAD(&qeth_core_card_list.list);
5557 rwlock_init(&qeth_core_card_list.rwlock); 4842 rwlock_init(&qeth_core_card_list.rwlock);
5558 mutex_init(&qeth_mod_mutex);
5559 4843
5560 rc = qeth_register_dbf_views(); 4844 rc = qeth_register_dbf_views();
5561 if (rc) 4845 if (rc)
5562 goto out_err; 4846 goto out_err;
4847 rc = ccw_driver_register(&qeth_ccw_driver);
4848 if (rc)
4849 goto ccw_err;
4850 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
4851 if (rc)
4852 goto ccwgroup_err;
4853 rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
4854 &driver_attr_group);
4855 if (rc)
4856 goto driver_err;
5563 qeth_core_root_dev = root_device_register("qeth"); 4857 qeth_core_root_dev = root_device_register("qeth");
5564 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0; 4858 rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
5565 if (rc) 4859 if (rc)
5566 goto register_err; 4860 goto register_err;
4861
5567 qeth_core_header_cache = kmem_cache_create("qeth_hdr", 4862 qeth_core_header_cache = kmem_cache_create("qeth_hdr",
5568 sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); 4863 sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
5569 if (!qeth_core_header_cache) { 4864 if (!qeth_core_header_cache) {
5570 rc = -ENOMEM; 4865 rc = -ENOMEM;
5571 goto slab_err; 4866 goto slab_err;
5572 } 4867 }
5573 qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
5574 sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
5575 if (!qeth_qdio_outbuf_cache) {
5576 rc = -ENOMEM;
5577 goto cqslab_err;
5578 }
5579 rc = ccw_driver_register(&qeth_ccw_driver);
5580 if (rc)
5581 goto ccw_err;
5582 qeth_core_ccwgroup_driver.driver.groups = qeth_drv_attr_groups;
5583 rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
5584 if (rc)
5585 goto ccwgroup_err;
5586 4868
5587 return 0; 4869 return 0;
5588
5589ccwgroup_err:
5590 ccw_driver_unregister(&qeth_ccw_driver);
5591ccw_err:
5592 kmem_cache_destroy(qeth_qdio_outbuf_cache);
5593cqslab_err:
5594 kmem_cache_destroy(qeth_core_header_cache);
5595slab_err: 4870slab_err:
5596 root_device_unregister(qeth_core_root_dev); 4871 root_device_unregister(qeth_core_root_dev);
5597register_err: 4872register_err:
4873 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4874 &driver_attr_group);
4875driver_err:
4876 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
4877ccwgroup_err:
4878 ccw_driver_unregister(&qeth_ccw_driver);
4879ccw_err:
4880 QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
5598 qeth_unregister_dbf_views(); 4881 qeth_unregister_dbf_views();
5599out_err: 4882out_err:
5600 pr_err("Initializing the qeth device driver failed\n"); 4883 pr_err("Initializing the qeth device driver failed\n");
@@ -5603,11 +4886,12 @@ out_err:
5603 4886
5604static void __exit qeth_core_exit(void) 4887static void __exit qeth_core_exit(void)
5605{ 4888{
4889 root_device_unregister(qeth_core_root_dev);
4890 driver_remove_file(&qeth_core_ccwgroup_driver.driver,
4891 &driver_attr_group);
5606 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); 4892 ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
5607 ccw_driver_unregister(&qeth_ccw_driver); 4893 ccw_driver_unregister(&qeth_ccw_driver);
5608 kmem_cache_destroy(qeth_qdio_outbuf_cache);
5609 kmem_cache_destroy(qeth_core_header_cache); 4894 kmem_cache_destroy(qeth_core_header_cache);
5610 root_device_unregister(qeth_core_root_dev);
5611 qeth_unregister_dbf_views(); 4895 qeth_unregister_dbf_views();
5612 pr_info("core functions removed\n"); 4896 pr_info("core functions removed\n");
5613} 4897}
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 5cebfddb86b..ec24901c802 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_core_mpc.c
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, 5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
4 * Thomas Spatzier <tspat@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>,
@@ -205,7 +207,6 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
205 {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"}, 207 {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
206 {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, 208 {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
207 {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, 209 {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
208 {IPA_RC_ENOMEM, "Memory problem"},
209 {IPA_RC_FFFF, "Unknown Error"} 210 {IPA_RC_FFFF, "Unknown Error"}
210}; 211};
211 212
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 3690bbf2cb3..e5a9d1c0383 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_core_mpc.h
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, 5 * Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
4 * Thomas Spatzier <tspat@de.ibm.com>, 6 * Thomas Spatzier <tspat@de.ibm.com>,
@@ -68,6 +70,16 @@ enum qeth_link_types {
68 QETH_LINK_TYPE_ATM_NATIVE = 0x90, 70 QETH_LINK_TYPE_ATM_NATIVE = 0x90,
69}; 71};
70 72
73enum qeth_tr_macaddr_modes {
74 QETH_TR_MACADDR_NONCANONICAL = 0,
75 QETH_TR_MACADDR_CANONICAL = 1,
76};
77
78enum qeth_tr_broadcast_modes {
79 QETH_TR_BROADCAST_ALLRINGS = 0,
80 QETH_TR_BROADCAST_LOCAL = 1,
81};
82
71/* 83/*
72 * Routing stuff 84 * Routing stuff
73 */ 85 */
@@ -178,7 +190,6 @@ enum qeth_ipa_return_codes {
178 IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013, 190 IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013,
179 IPA_RC_LAN_OFFLINE = 0xe080, 191 IPA_RC_LAN_OFFLINE = 0xe080,
180 IPA_RC_INVALID_IP_VERSION2 = 0xf001, 192 IPA_RC_INVALID_IP_VERSION2 = 0xf001,
181 IPA_RC_ENOMEM = 0xfffe,
182 IPA_RC_FFFF = 0xffff 193 IPA_RC_FFFF = 0xffff
183}; 194};
184/* for DELIP */ 195/* for DELIP */
@@ -238,7 +249,6 @@ enum qeth_ipa_setadp_cmd {
238 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L, 249 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
239 IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L, 250 IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
240 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L, 251 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
241 IPA_SETADP_QUERY_OAT = 0x00080000L,
242}; 252};
243enum qeth_ipa_mac_ops { 253enum qeth_ipa_mac_ops {
244 CHANGE_ADDR_READ_MAC = 0, 254 CHANGE_ADDR_READ_MAC = 0,
@@ -388,17 +398,6 @@ struct qeth_set_access_ctrl {
388 __u32 subcmd_code; 398 __u32 subcmd_code;
389} __attribute__((packed)); 399} __attribute__((packed));
390 400
391struct qeth_query_oat {
392 __u32 subcmd_code;
393 __u8 reserved[12];
394} __packed;
395
396struct qeth_qoat_priv {
397 __u32 buffer_len;
398 __u32 response_len;
399 char *buffer;
400};
401
402struct qeth_ipacmd_setadpparms_hdr { 401struct qeth_ipacmd_setadpparms_hdr {
403 __u32 supp_hw_cmds; 402 __u32 supp_hw_cmds;
404 __u32 reserved1; 403 __u32 reserved1;
@@ -418,7 +417,6 @@ struct qeth_ipacmd_setadpparms {
418 struct qeth_change_addr change_addr; 417 struct qeth_change_addr change_addr;
419 struct qeth_snmp_cmd snmp; 418 struct qeth_snmp_cmd snmp;
420 struct qeth_set_access_ctrl set_access_ctrl; 419 struct qeth_set_access_ctrl set_access_ctrl;
421 struct qeth_query_oat query_oat;
422 __u32 mode; 420 __u32 mode;
423 } data; 421 } data;
424} __attribute__ ((packed)); 422} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9655dc0ea0e..0a8e86c1b0e 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_core_sys.c
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
4 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>,
@@ -432,8 +434,8 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
432 goto out; 434 goto out;
433 else { 435 else {
434 card->info.mac_bits = 0; 436 card->info.mac_bits = 0;
435 if (card->discipline) { 437 if (card->discipline.ccwgdriver) {
436 card->discipline->remove(card->gdev); 438 card->discipline.ccwgdriver->remove(card->gdev);
437 qeth_core_free_discipline(card); 439 qeth_core_free_discipline(card);
438 } 440 }
439 } 441 }
@@ -442,7 +444,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
442 if (rc) 444 if (rc)
443 goto out; 445 goto out;
444 446
445 rc = card->discipline->setup(card->gdev); 447 rc = card->discipline.ccwgdriver->probe(card->gdev);
446out: 448out:
447 mutex_unlock(&card->discipline_mutex); 449 mutex_unlock(&card->discipline_mutex);
448 return rc ? rc : count; 450 return rc ? rc : count;
@@ -691,6 +693,7 @@ static struct attribute *qeth_blkt_device_attrs[] = {
691 &dev_attr_inter_jumbo.attr, 693 &dev_attr_inter_jumbo.attr,
692 NULL, 694 NULL,
693}; 695};
696
694static struct attribute_group qeth_device_blkt_group = { 697static struct attribute_group qeth_device_blkt_group = {
695 .name = "blkt", 698 .name = "blkt",
696 .attrs = qeth_blkt_device_attrs, 699 .attrs = qeth_blkt_device_attrs,
@@ -713,16 +716,11 @@ static struct attribute *qeth_device_attrs[] = {
713 &dev_attr_hw_trap.attr, 716 &dev_attr_hw_trap.attr,
714 NULL, 717 NULL,
715}; 718};
719
716static struct attribute_group qeth_device_attr_group = { 720static struct attribute_group qeth_device_attr_group = {
717 .attrs = qeth_device_attrs, 721 .attrs = qeth_device_attrs,
718}; 722};
719 723
720const struct attribute_group *qeth_generic_attr_groups[] = {
721 &qeth_device_attr_group,
722 &qeth_device_blkt_group,
723 NULL,
724};
725
726static struct attribute *qeth_osn_device_attrs[] = { 724static struct attribute *qeth_osn_device_attrs[] = {
727 &dev_attr_state.attr, 725 &dev_attr_state.attr,
728 &dev_attr_chpid.attr, 726 &dev_attr_chpid.attr,
@@ -732,10 +730,37 @@ static struct attribute *qeth_osn_device_attrs[] = {
732 &dev_attr_recover.attr, 730 &dev_attr_recover.attr,
733 NULL, 731 NULL,
734}; 732};
733
735static struct attribute_group qeth_osn_device_attr_group = { 734static struct attribute_group qeth_osn_device_attr_group = {
736 .attrs = qeth_osn_device_attrs, 735 .attrs = qeth_osn_device_attrs,
737}; 736};
738const struct attribute_group *qeth_osn_attr_groups[] = { 737
739 &qeth_osn_device_attr_group, 738int qeth_core_create_device_attributes(struct device *dev)
740 NULL, 739{
741}; 740 int ret;
741 ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group);
742 if (ret)
743 return ret;
744 ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group);
745 if (ret)
746 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
747
748 return 0;
749}
750
751void qeth_core_remove_device_attributes(struct device *dev)
752{
753 sysfs_remove_group(&dev->kobj, &qeth_device_attr_group);
754 sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group);
755}
756
757int qeth_core_create_osn_attributes(struct device *dev)
758{
759 return sysfs_create_group(&dev->kobj, &qeth_osn_device_attr_group);
760}
761
762void qeth_core_remove_osn_attributes(struct device *dev)
763{
764 sysfs_remove_group(&dev->kobj, &qeth_osn_device_attr_group);
765 return;
766}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 73195553f84..b70b47fbd6c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_l2_main.c
3 *
2 * Copyright IBM Corp. 2007, 2009 4 * Copyright IBM Corp. 2007, 2009
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
4 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>,
@@ -73,9 +75,6 @@ static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
73 mii_data->val_out = qeth_mdio_read(dev, 75 mii_data->val_out = qeth_mdio_read(dev,
74 mii_data->phy_id, mii_data->reg_num); 76 mii_data->phy_id, mii_data->reg_num);
75 break; 77 break;
76 case SIOC_QETH_QUERY_OAT:
77 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
78 break;
79 default: 78 default:
80 rc = -EOPNOTSUPP; 79 rc = -EOPNOTSUPP;
81 } 80 }
@@ -302,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
302 spin_unlock_bh(&card->vlanlock); 301 spin_unlock_bh(&card->vlanlock);
303} 302}
304 303
305static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 304static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
306{ 305{
307 struct qeth_card *card = dev->ml_priv; 306 struct qeth_card *card = dev->ml_priv;
308 struct qeth_vlan_vid *id; 307 struct qeth_vlan_vid *id;
309 308
310 QETH_CARD_TEXT_(card, 4, "aid:%d", vid); 309 QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
311 if (!vid) 310 if (!vid)
312 return 0; 311 return;
313 if (card->info.type == QETH_CARD_TYPE_OSM) { 312 if (card->info.type == QETH_CARD_TYPE_OSM) {
314 QETH_CARD_TEXT(card, 3, "aidOSM"); 313 QETH_CARD_TEXT(card, 3, "aidOSM");
315 return 0; 314 return;
316 } 315 }
317 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 316 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
318 QETH_CARD_TEXT(card, 3, "aidREC"); 317 QETH_CARD_TEXT(card, 3, "aidREC");
319 return 0; 318 return;
320 } 319 }
321 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); 320 id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
322 if (id) { 321 if (id) {
@@ -325,13 +324,10 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
325 spin_lock_bh(&card->vlanlock); 324 spin_lock_bh(&card->vlanlock);
326 list_add_tail(&id->list, &card->vid_list); 325 list_add_tail(&id->list, &card->vid_list);
327 spin_unlock_bh(&card->vlanlock); 326 spin_unlock_bh(&card->vlanlock);
328 } else {
329 return -ENOMEM;
330 } 327 }
331 return 0;
332} 328}
333 329
334static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 330static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
335{ 331{
336 struct qeth_vlan_vid *id, *tmpid = NULL; 332 struct qeth_vlan_vid *id, *tmpid = NULL;
337 struct qeth_card *card = dev->ml_priv; 333 struct qeth_card *card = dev->ml_priv;
@@ -339,11 +335,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
339 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 335 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
340 if (card->info.type == QETH_CARD_TYPE_OSM) { 336 if (card->info.type == QETH_CARD_TYPE_OSM) {
341 QETH_CARD_TEXT(card, 3, "kidOSM"); 337 QETH_CARD_TEXT(card, 3, "kidOSM");
342 return 0; 338 return;
343 } 339 }
344 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 340 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
345 QETH_CARD_TEXT(card, 3, "kidREC"); 341 QETH_CARD_TEXT(card, 3, "kidREC");
346 return 0; 342 return;
347 } 343 }
348 spin_lock_bh(&card->vlanlock); 344 spin_lock_bh(&card->vlanlock);
349 list_for_each_entry(id, &card->vid_list, list) { 345 list_for_each_entry(id, &card->vid_list, list) {
@@ -359,7 +355,6 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
359 kfree(tmpid); 355 kfree(tmpid);
360 } 356 }
361 qeth_l2_set_multicast_list(card->dev); 357 qeth_l2_set_multicast_list(card->dev);
362 return 0;
363} 358}
364 359
365static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) 360static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -411,10 +406,10 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
411 unsigned int len; 406 unsigned int len;
412 407
413 *done = 0; 408 *done = 0;
414 WARN_ON_ONCE(!budget); 409 BUG_ON(!budget);
415 while (budget) { 410 while (budget) {
416 skb = qeth_core_get_next_skb(card, 411 skb = qeth_core_get_next_skb(card,
417 &card->qdio.in_q->bufs[card->rx.b_index], 412 card->qdio.in_q->bufs[card->rx.b_index].buffer,
418 &card->rx.b_element, &card->rx.e_offset, &hdr); 413 &card->rx.b_element, &card->rx.e_offset, &hdr);
419 if (!skb) { 414 if (!skb) {
420 *done = 1; 415 *done = 1;
@@ -574,6 +569,7 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
574 default: 569 default:
575 break; 570 break;
576 } 571 }
572 cmd->hdr.return_code = -EIO;
577 } else { 573 } else {
578 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; 574 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
579 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, 575 memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
@@ -602,6 +598,7 @@ static int qeth_l2_send_delmac_cb(struct qeth_card *card,
602 cmd = (struct qeth_ipa_cmd *) data; 598 cmd = (struct qeth_ipa_cmd *) data;
603 if (cmd->hdr.return_code) { 599 if (cmd->hdr.return_code) {
604 QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); 600 QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
601 cmd->hdr.return_code = -EIO;
605 return 0; 602 return 0;
606 } 603 }
607 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; 604 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
@@ -626,13 +623,10 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
626 QETH_DBF_TEXT(SETUP, 2, "doL2init"); 623 QETH_DBF_TEXT(SETUP, 2, "doL2init");
627 QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); 624 QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card));
628 625
629 if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { 626 rc = qeth_query_setadapterparms(card);
630 rc = qeth_query_setadapterparms(card); 627 if (rc) {
631 if (rc) { 628 QETH_DBF_MESSAGE(2, "could not query adapter parameters on "
632 QETH_DBF_MESSAGE(2, "could not query adapter " 629 "device %s: x%x\n", CARD_BUS_ID(card), rc);
633 "parameters on device %s: x%x\n",
634 CARD_BUS_ID(card), rc);
635 }
636 } 630 }
637 631
638 if (card->info.type == QETH_CARD_TYPE_IQD || 632 if (card->info.type == QETH_CARD_TYPE_IQD ||
@@ -648,7 +642,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
648 } 642 }
649 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN); 643 QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
650 } else { 644 } else {
651 eth_random_addr(card->dev->dev_addr); 645 random_ether_addr(card->dev->dev_addr);
652 memcpy(card->dev->dev_addr, vendor_pre, 3); 646 memcpy(card->dev->dev_addr, vendor_pre, 3);
653 } 647 }
654 return 0; 648 return 0;
@@ -679,9 +673,9 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
679 return -ERESTARTSYS; 673 return -ERESTARTSYS;
680 } 674 }
681 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); 675 rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
682 if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND)) 676 if (!rc)
683 rc = qeth_l2_send_setmac(card, addr->sa_data); 677 rc = qeth_l2_send_setmac(card, addr->sa_data);
684 return rc ? -EINVAL : 0; 678 return rc;
685} 679}
686 680
687static void qeth_l2_set_multicast_list(struct net_device *dev) 681static void qeth_l2_set_multicast_list(struct net_device *dev)
@@ -883,6 +877,12 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
883 INIT_LIST_HEAD(&card->mc_list); 877 INIT_LIST_HEAD(&card->mc_list);
884 card->options.layer2 = 1; 878 card->options.layer2 = 1;
885 card->info.hwtrap = 0; 879 card->info.hwtrap = 0;
880 card->discipline.start_poll = qeth_qdio_start_poll;
881 card->discipline.input_handler = (qdio_handler_t *)
882 qeth_qdio_input_handler;
883 card->discipline.output_handler = (qdio_handler_t *)
884 qeth_qdio_output_handler;
885 card->discipline.recover = qeth_l2_recover;
886 return 0; 886 return 0;
887} 887}
888 888
@@ -925,7 +925,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
925 .ndo_get_stats = qeth_get_stats, 925 .ndo_get_stats = qeth_get_stats,
926 .ndo_start_xmit = qeth_l2_hard_start_xmit, 926 .ndo_start_xmit = qeth_l2_hard_start_xmit,
927 .ndo_validate_addr = eth_validate_addr, 927 .ndo_validate_addr = eth_validate_addr,
928 .ndo_set_rx_mode = qeth_l2_set_multicast_list, 928 .ndo_set_multicast_list = qeth_l2_set_multicast_list,
929 .ndo_do_ioctl = qeth_l2_do_ioctl, 929 .ndo_do_ioctl = qeth_l2_do_ioctl,
930 .ndo_set_mac_address = qeth_l2_set_mac_address, 930 .ndo_set_mac_address = qeth_l2_set_mac_address,
931 .ndo_change_mtu = qeth_change_mtu, 931 .ndo_change_mtu = qeth_change_mtu,
@@ -973,6 +973,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
973 int rc = 0; 973 int rc = 0;
974 enum qeth_card_states recover_flag; 974 enum qeth_card_states recover_flag;
975 975
976 BUG_ON(!card);
976 mutex_lock(&card->discipline_mutex); 977 mutex_lock(&card->discipline_mutex);
977 mutex_lock(&card->conf_mutex); 978 mutex_lock(&card->conf_mutex);
978 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 979 QETH_DBF_TEXT(SETUP, 2, "setonlin");
@@ -985,7 +986,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
985 rc = -ENODEV; 986 rc = -ENODEV;
986 goto out_remove; 987 goto out_remove;
987 } 988 }
988 qeth_trace_features(card);
989 989
990 if (!card->dev && qeth_l2_setup_netdev(card)) { 990 if (!card->dev && qeth_l2_setup_netdev(card)) {
991 rc = -ENODEV; 991 rc = -ENODEV;
@@ -1144,12 +1144,11 @@ static int qeth_l2_recover(void *ptr)
1144 dev_info(&card->gdev->dev, 1144 dev_info(&card->gdev->dev,
1145 "Device successfully recovered!\n"); 1145 "Device successfully recovered!\n");
1146 else { 1146 else {
1147 if (rtnl_trylock()) { 1147 rtnl_lock();
1148 dev_close(card->dev); 1148 dev_close(card->dev);
1149 rtnl_unlock(); 1149 rtnl_unlock();
1150 dev_warn(&card->gdev->dev, "The qeth device driver " 1150 dev_warn(&card->gdev->dev, "The qeth device driver "
1151 "failed to recover an error on the device\n"); 1151 "failed to recover an error on the device\n");
1152 }
1153 } 1152 }
1154 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 1153 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1155 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 1154 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
@@ -1170,7 +1169,6 @@ static void __exit qeth_l2_exit(void)
1170static void qeth_l2_shutdown(struct ccwgroup_device *gdev) 1169static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
1171{ 1170{
1172 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 1171 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
1173 qeth_set_allowed_threads(card, 0, 1);
1174 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 1172 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
1175 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 1173 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
1176 qeth_qdio_clear_card(card, 0); 1174 qeth_qdio_clear_card(card, 0);
@@ -1223,12 +1221,8 @@ out:
1223 return rc; 1221 return rc;
1224} 1222}
1225 1223
1226struct qeth_discipline qeth_l2_discipline = { 1224struct ccwgroup_driver qeth_l2_ccwgroup_driver = {
1227 .start_poll = qeth_qdio_start_poll, 1225 .probe = qeth_l2_probe_device,
1228 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
1229 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
1230 .recover = qeth_l2_recover,
1231 .setup = qeth_l2_probe_device,
1232 .remove = qeth_l2_remove_device, 1226 .remove = qeth_l2_remove_device,
1233 .set_online = qeth_l2_set_online, 1227 .set_online = qeth_l2_set_online,
1234 .set_offline = qeth_l2_set_offline, 1228 .set_offline = qeth_l2_set_offline,
@@ -1237,7 +1231,7 @@ struct qeth_discipline qeth_l2_discipline = {
1237 .thaw = qeth_l2_pm_resume, 1231 .thaw = qeth_l2_pm_resume,
1238 .restore = qeth_l2_pm_resume, 1232 .restore = qeth_l2_pm_resume,
1239}; 1233};
1240EXPORT_SYMBOL_GPL(qeth_l2_discipline); 1234EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver);
1241 1235
1242static int qeth_osn_send_control_data(struct qeth_card *card, int len, 1236static int qeth_osn_send_control_data(struct qeth_card *card, int len,
1243 struct qeth_cmd_buffer *iob) 1237 struct qeth_cmd_buffer *iob)
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 29c1c00e3a0..14a43aeb0c2 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_l3.h
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
4 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>,
@@ -61,9 +63,5 @@ int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 63void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
62 const u8 *); 64 const u8 *);
63int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); 65int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
64struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
65int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
66int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
67void qeth_l3_set_ip_addr_list(struct qeth_card *);
68 66
69#endif /* __QETH_L3_H__ */ 67#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6e5eef01e66..c74e8670fab 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_l3_main.c
3 *
2 * Copyright IBM Corp. 2007, 2009 4 * Copyright IBM Corp. 2007, 2009
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
4 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>,
@@ -26,10 +28,7 @@
26 28
27#include <net/ip.h> 29#include <net/ip.h>
28#include <net/arp.h> 30#include <net/arp.h>
29#include <net/route.h>
30#include <net/ip6_fib.h>
31#include <net/ip6_checksum.h> 31#include <net/ip6_checksum.h>
32#include <net/iucv/af_iucv.h>
33 32
34#include "qeth_l3.h" 33#include "qeth_l3.h"
35 34
@@ -268,7 +267,7 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
268 } 267 }
269} 268}
270 269
271int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 270static int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
272{ 271{
273 unsigned long flags; 272 unsigned long flags;
274 int rc = 0; 273 int rc = 0;
@@ -287,7 +286,7 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
287 return rc; 286 return rc;
288} 287}
289 288
290int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) 289static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
291{ 290{
292 unsigned long flags; 291 unsigned long flags;
293 int rc = 0; 292 int rc = 0;
@@ -306,7 +305,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
306} 305}
307 306
308 307
309struct qeth_ipaddr *qeth_l3_get_addr_buffer( 308static struct qeth_ipaddr *qeth_l3_get_addr_buffer(
310 enum qeth_prot_versions prot) 309 enum qeth_prot_versions prot)
311{ 310{
312 struct qeth_ipaddr *addr; 311 struct qeth_ipaddr *addr;
@@ -422,7 +421,7 @@ again:
422 list_splice(&fail_list, &card->ip_list); 421 list_splice(&fail_list, &card->ip_list);
423} 422}
424 423
425void qeth_l3_set_ip_addr_list(struct qeth_card *card) 424static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
426{ 425{
427 struct list_head *tbd_list; 426 struct list_head *tbd_list;
428 struct qeth_ipaddr *todo, *addr; 427 struct qeth_ipaddr *todo, *addr;
@@ -439,7 +438,7 @@ void qeth_l3_set_ip_addr_list(struct qeth_card *card)
439 438
440 spin_lock_irqsave(&card->ip_lock, flags); 439 spin_lock_irqsave(&card->ip_lock, flags);
441 tbd_list = card->ip_tbd_list; 440 tbd_list = card->ip_tbd_list;
442 card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_ATOMIC); 441 card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
443 if (!card->ip_tbd_list) { 442 if (!card->ip_tbd_list) {
444 QETH_CARD_TEXT(card, 0, "silnomem"); 443 QETH_CARD_TEXT(card, 0, "silnomem");
445 card->ip_tbd_list = tbd_list; 444 card->ip_tbd_list = tbd_list;
@@ -794,7 +793,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
794 rc = -EEXIST; 793 rc = -EEXIST;
795 spin_unlock_irqrestore(&card->ip_lock, flags); 794 spin_unlock_irqrestore(&card->ip_lock, flags);
796 if (rc) { 795 if (rc) {
797 kfree(ipaddr);
798 return rc; 796 return rc;
799 } 797 }
800 if (!qeth_l3_add_ip(card, ipaddr)) 798 if (!qeth_l3_add_ip(card, ipaddr))
@@ -859,7 +857,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
859 rc = -EEXIST; 857 rc = -EEXIST;
860 spin_unlock_irqrestore(&card->ip_lock, flags); 858 spin_unlock_irqrestore(&card->ip_lock, flags);
861 if (rc) { 859 if (rc) {
862 kfree(ipaddr);
863 return rc; 860 return rc;
864 } 861 }
865 if (!qeth_l3_add_ip(card, ipaddr)) 862 if (!qeth_l3_add_ip(card, ipaddr))
@@ -976,6 +973,57 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type)
976 return ct | QETH_CAST_UNICAST; 973 return ct | QETH_CAST_UNICAST;
977} 974}
978 975
976static int qeth_l3_send_setadp_mode(struct qeth_card *card, __u32 command,
977 __u32 mode)
978{
979 int rc;
980 struct qeth_cmd_buffer *iob;
981 struct qeth_ipa_cmd *cmd;
982
983 QETH_CARD_TEXT(card, 4, "adpmode");
984
985 iob = qeth_get_adapter_cmd(card, command,
986 sizeof(struct qeth_ipacmd_setadpparms));
987 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
988 cmd->data.setadapterparms.data.mode = mode;
989 rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb,
990 NULL);
991 return rc;
992}
993
994static int qeth_l3_setadapter_hstr(struct qeth_card *card)
995{
996 int rc;
997
998 QETH_CARD_TEXT(card, 4, "adphstr");
999
1000 if (qeth_adp_supported(card, IPA_SETADP_SET_BROADCAST_MODE)) {
1001 rc = qeth_l3_send_setadp_mode(card,
1002 IPA_SETADP_SET_BROADCAST_MODE,
1003 card->options.broadcast_mode);
1004 if (rc)
1005 QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on "
1006 "device %s: x%x\n",
1007 CARD_BUS_ID(card), rc);
1008 rc = qeth_l3_send_setadp_mode(card,
1009 IPA_SETADP_ALTER_MAC_ADDRESS,
1010 card->options.macaddr_mode);
1011 if (rc)
1012 QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on "
1013 "device %s: x%x\n", CARD_BUS_ID(card), rc);
1014 return rc;
1015 }
1016 if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL)
1017 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
1018 "to set broadcast mode, using ALLRINGS "
1019 "on device %s:\n", CARD_BUS_ID(card));
1020 if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL)
1021 QETH_DBF_MESSAGE(2, "set adapter parameters not available "
1022 "to set macaddr mode, using NONCANONICAL "
1023 "on device %s:\n", CARD_BUS_ID(card));
1024 return 0;
1025}
1026
979static int qeth_l3_setadapter_parms(struct qeth_card *card) 1027static int qeth_l3_setadapter_parms(struct qeth_card *card)
980{ 1028{
981 int rc; 1029 int rc;
@@ -1001,6 +1049,10 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card)
1001 " address failed\n"); 1049 " address failed\n");
1002 } 1050 }
1003 1051
1052 if ((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
1053 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))
1054 rc = qeth_l3_setadapter_hstr(card);
1055
1004 return rc; 1056 return rc;
1005} 1057}
1006 1058
@@ -1362,7 +1414,7 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1362 return 0; 1414 return 0;
1363} 1415}
1364 1416
1365static int qeth_l3_set_rx_csum(struct qeth_card *card, int on) 1417int qeth_l3_set_rx_csum(struct qeth_card *card, int on)
1366{ 1418{
1367 int rc = 0; 1419 int rc = 0;
1368 1420
@@ -1473,7 +1525,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
1473 memcpy(card->dev->dev_addr, 1525 memcpy(card->dev->dev_addr,
1474 cmd->data.create_destroy_addr.unique_id, ETH_ALEN); 1526 cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
1475 else 1527 else
1476 eth_random_addr(card->dev->dev_addr); 1528 random_ether_addr(card->dev->dev_addr);
1477 1529
1478 return 0; 1530 return 0;
1479} 1531}
@@ -1616,7 +1668,10 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
1616static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac, 1668static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
1617 struct net_device *dev) 1669 struct net_device *dev)
1618{ 1670{
1619 ip_eth_mc_map(ipm, mac); 1671 if (dev->type == ARPHRD_IEEE802_TR)
1672 ip_tr_mc_map(ipm, mac);
1673 else
1674 ip_eth_mc_map(ipm, mac);
1620} 1675}
1621 1676
1622static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev) 1677static void qeth_l3_add_mc(struct qeth_card *card, struct in_device *in4_dev)
@@ -1760,8 +1815,6 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
1760 QETH_CARD_TEXT(card, 4, "frvaddr4"); 1815 QETH_CARD_TEXT(card, 4, "frvaddr4");
1761 1816
1762 netdev = __vlan_find_dev_deep(card->dev, vid); 1817 netdev = __vlan_find_dev_deep(card->dev, vid);
1763 if (!netdev)
1764 return;
1765 in_dev = in_dev_get(netdev); 1818 in_dev = in_dev_get(netdev);
1766 if (!in_dev) 1819 if (!in_dev)
1767 return; 1820 return;
@@ -1790,8 +1843,6 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1790 QETH_CARD_TEXT(card, 4, "frvaddr6"); 1843 QETH_CARD_TEXT(card, 4, "frvaddr6");
1791 1844
1792 netdev = __vlan_find_dev_deep(card->dev, vid); 1845 netdev = __vlan_find_dev_deep(card->dev, vid);
1793 if (!netdev)
1794 return;
1795 in6_dev = in6_dev_get(netdev); 1846 in6_dev = in6_dev_get(netdev);
1796 if (!in6_dev) 1847 if (!in6_dev)
1797 return; 1848 return;
@@ -1817,15 +1868,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1817 qeth_l3_free_vlan_addresses6(card, vid); 1868 qeth_l3_free_vlan_addresses6(card, vid);
1818} 1869}
1819 1870
1820static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1871static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1821{ 1872{
1822 struct qeth_card *card = dev->ml_priv; 1873 struct qeth_card *card = dev->ml_priv;
1823 1874
1824 set_bit(vid, card->active_vlans); 1875 set_bit(vid, card->active_vlans);
1825 return 0; 1876 return;
1826} 1877}
1827 1878
1828static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 1879static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1829{ 1880{
1830 struct qeth_card *card = dev->ml_priv; 1881 struct qeth_card *card = dev->ml_priv;
1831 unsigned long flags; 1882 unsigned long flags;
@@ -1833,7 +1884,7 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1833 QETH_CARD_TEXT_(card, 4, "kid:%d", vid); 1884 QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
1834 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { 1885 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
1835 QETH_CARD_TEXT(card, 3, "kidREC"); 1886 QETH_CARD_TEXT(card, 3, "kidREC");
1836 return 0; 1887 return;
1837 } 1888 }
1838 spin_lock_irqsave(&card->vlanlock, flags); 1889 spin_lock_irqsave(&card->vlanlock, flags);
1839 /* unregister IP addresses of vlan device */ 1890 /* unregister IP addresses of vlan device */
@@ -1841,7 +1892,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1841 clear_bit(vid, card->active_vlans); 1892 clear_bit(vid, card->active_vlans);
1842 spin_unlock_irqrestore(&card->vlanlock, flags); 1893 spin_unlock_irqrestore(&card->vlanlock, flags);
1843 qeth_l3_set_multicast_list(card->dev); 1894 qeth_l3_set_multicast_list(card->dev);
1844 return 0;
1845} 1895}
1846 1896
1847static inline int qeth_l3_rebuild_skb(struct qeth_card *card, 1897static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -1868,6 +1918,8 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
1868#endif 1918#endif
1869 case __constant_htons(ETH_P_IP): 1919 case __constant_htons(ETH_P_IP):
1870 ip_hdr = (struct iphdr *)skb->data; 1920 ip_hdr = (struct iphdr *)skb->data;
1921 (card->dev->type == ARPHRD_IEEE802_TR) ?
1922 ip_tr_mc_map(ip_hdr->daddr, tg_addr):
1871 ip_eth_mc_map(ip_hdr->daddr, tg_addr); 1923 ip_eth_mc_map(ip_hdr->daddr, tg_addr);
1872 break; 1924 break;
1873 default: 1925 default:
@@ -1903,7 +1955,12 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
1903 tg_addr, "FAKELL", card->dev->addr_len); 1955 tg_addr, "FAKELL", card->dev->addr_len);
1904 } 1956 }
1905 1957
1906 skb->protocol = eth_type_trans(skb, card->dev); 1958#ifdef CONFIG_TR
1959 if (card->dev->type == ARPHRD_IEEE802_TR)
1960 skb->protocol = tr_type_trans(skb, card->dev);
1961 else
1962#endif
1963 skb->protocol = eth_type_trans(skb, card->dev);
1907 1964
1908 if (hdr->hdr.l3.ext_flags & 1965 if (hdr->hdr.l3.ext_flags &
1909 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { 1966 (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) {
@@ -1936,13 +1993,12 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1936 __u16 vlan_tag = 0; 1993 __u16 vlan_tag = 0;
1937 int is_vlan; 1994 int is_vlan;
1938 unsigned int len; 1995 unsigned int len;
1939 __u16 magic;
1940 1996
1941 *done = 0; 1997 *done = 0;
1942 WARN_ON_ONCE(!budget); 1998 BUG_ON(!budget);
1943 while (budget) { 1999 while (budget) {
1944 skb = qeth_core_get_next_skb(card, 2000 skb = qeth_core_get_next_skb(card,
1945 &card->qdio.in_q->bufs[card->rx.b_index], 2001 card->qdio.in_q->bufs[card->rx.b_index].buffer,
1946 &card->rx.b_element, &card->rx.e_offset, &hdr); 2002 &card->rx.b_element, &card->rx.e_offset, &hdr);
1947 if (!skb) { 2003 if (!skb) {
1948 *done = 1; 2004 *done = 1;
@@ -1951,26 +2007,12 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1951 skb->dev = card->dev; 2007 skb->dev = card->dev;
1952 switch (hdr->hdr.l3.id) { 2008 switch (hdr->hdr.l3.id) {
1953 case QETH_HEADER_TYPE_LAYER3: 2009 case QETH_HEADER_TYPE_LAYER3:
1954 magic = *(__u16 *)skb->data; 2010 is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
1955 if ((card->info.type == QETH_CARD_TYPE_IQD) &&
1956 (magic == ETH_P_AF_IUCV)) {
1957 skb->protocol = ETH_P_AF_IUCV;
1958 skb->pkt_type = PACKET_HOST;
1959 skb->mac_header = NET_SKB_PAD;
1960 skb->dev = card->dev;
1961 len = skb->len;
1962 card->dev->header_ops->create(skb, card->dev, 0,
1963 card->dev->dev_addr, "FAKELL",
1964 card->dev->addr_len);
1965 netif_receive_skb(skb);
1966 } else {
1967 is_vlan = qeth_l3_rebuild_skb(card, skb, hdr,
1968 &vlan_tag); 2011 &vlan_tag);
1969 len = skb->len; 2012 len = skb->len;
1970 if (is_vlan && !card->options.sniffer) 2013 if (is_vlan && !card->options.sniffer)
1971 __vlan_hwaccel_put_tag(skb, vlan_tag); 2014 __vlan_hwaccel_put_tag(skb, vlan_tag);
1972 napi_gro_receive(&card->napi, skb); 2015 napi_gro_receive(&card->napi, skb);
1973 }
1974 break; 2016 break;
1975 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ 2017 case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
1976 skb->pkt_type = PACKET_HOST; 2018 skb->pkt_type = PACKET_HOST;
@@ -2077,7 +2119,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2077 struct net_device *netdev; 2119 struct net_device *netdev;
2078 2120
2079 rcu_read_lock(); 2121 rcu_read_lock();
2080 netdev = __vlan_find_dev_deep(card->dev, vid); 2122 netdev = __vlan_find_dev_deep(dev, vid);
2081 rcu_read_unlock(); 2123 rcu_read_unlock();
2082 if (netdev == dev) { 2124 if (netdev == dev) {
2083 rc = QETH_VLAN_CARD; 2125 rc = QETH_VLAN_CARD;
@@ -2369,7 +2411,7 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card,
2369 2411
2370 if ((qinfo->udata_len - qinfo->udata_offset) < esize) { 2412 if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
2371 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); 2413 QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
2372 cmd->hdr.return_code = IPA_RC_ENOMEM; 2414 cmd->hdr.return_code = -ENOMEM;
2373 goto out_error; 2415 goto out_error;
2374 } 2416 }
2375 2417
@@ -2684,9 +2726,6 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2684 mii_data->phy_id, 2726 mii_data->phy_id,
2685 mii_data->reg_num); 2727 mii_data->reg_num);
2686 break; 2728 break;
2687 case SIOC_QETH_QUERY_OAT:
2688 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
2689 break;
2690 default: 2729 default:
2691 rc = -EOPNOTSUPP; 2730 rc = -EOPNOTSUPP;
2692 } 2731 }
@@ -2704,11 +2743,10 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2704 rcu_read_lock(); 2743 rcu_read_lock();
2705 dst = skb_dst(skb); 2744 dst = skb_dst(skb);
2706 if (dst) 2745 if (dst)
2707 n = dst_neigh_lookup_skb(dst, skb); 2746 n = dst_get_neighbour(dst);
2708 if (n) { 2747 if (n) {
2709 cast_type = n->type; 2748 cast_type = n->type;
2710 rcu_read_unlock(); 2749 rcu_read_unlock();
2711 neigh_release(n);
2712 if ((cast_type == RTN_BROADCAST) || 2750 if ((cast_type == RTN_BROADCAST) ||
2713 (cast_type == RTN_MULTICAST) || 2751 (cast_type == RTN_MULTICAST) ||
2714 (cast_type == RTN_ANYCAST)) 2752 (cast_type == RTN_ANYCAST))
@@ -2750,33 +2788,10 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
2750 return cast_type; 2788 return cast_type;
2751} 2789}
2752 2790
2753static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
2754 struct qeth_hdr *hdr, struct sk_buff *skb)
2755{
2756 char daddr[16];
2757 struct af_iucv_trans_hdr *iucv_hdr;
2758
2759 skb_pull(skb, 14);
2760 card->dev->header_ops->create(skb, card->dev, 0,
2761 card->dev->dev_addr, card->dev->dev_addr,
2762 card->dev->addr_len);
2763 skb_pull(skb, 14);
2764 iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
2765 memset(hdr, 0, sizeof(struct qeth_hdr));
2766 hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
2767 hdr->hdr.l3.ext_flags = 0;
2768 hdr->hdr.l3.length = skb->len;
2769 hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
2770 memset(daddr, 0, sizeof(daddr));
2771 daddr[0] = 0xfe;
2772 daddr[1] = 0x80;
2773 memcpy(&daddr[8], iucv_hdr->destUserID, 8);
2774 memcpy(hdr->hdr.l3.dest_addr, daddr, 16);
2775}
2776
2777static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 2791static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2778 struct sk_buff *skb, int ipv, int cast_type) 2792 struct sk_buff *skb, int ipv, int cast_type)
2779{ 2793{
2794 struct neighbour *n = NULL;
2780 struct dst_entry *dst; 2795 struct dst_entry *dst;
2781 2796
2782 memset(hdr, 0, sizeof(struct qeth_hdr)); 2797 memset(hdr, 0, sizeof(struct qeth_hdr));
@@ -2799,31 +2814,41 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2799 2814
2800 rcu_read_lock(); 2815 rcu_read_lock();
2801 dst = skb_dst(skb); 2816 dst = skb_dst(skb);
2817 if (dst)
2818 n = dst_get_neighbour(dst);
2802 if (ipv == 4) { 2819 if (ipv == 4) {
2803 struct rtable *rt = (struct rtable *) dst;
2804 __be32 *pkey = &ip_hdr(skb)->daddr;
2805
2806 if (rt->rt_gateway)
2807 pkey = &rt->rt_gateway;
2808
2809 /* IPv4 */ 2820 /* IPv4 */
2810 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type); 2821 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
2811 memset(hdr->hdr.l3.dest_addr, 0, 12); 2822 memset(hdr->hdr.l3.dest_addr, 0, 12);
2812 *((__be32 *) (&hdr->hdr.l3.dest_addr[12])) = *pkey; 2823 if (n) {
2824 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2825 *((u32 *) n->primary_key);
2826 } else {
2827 /* fill in destination address used in ip header */
2828 *((u32 *) (&hdr->hdr.l3.dest_addr[12])) =
2829 ip_hdr(skb)->daddr;
2830 }
2813 } else if (ipv == 6) { 2831 } else if (ipv == 6) {
2814 struct rt6_info *rt = (struct rt6_info *) dst;
2815 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
2816
2817 if (!ipv6_addr_any(&rt->rt6i_gateway))
2818 pkey = &rt->rt6i_gateway;
2819
2820 /* IPv6 */ 2832 /* IPv6 */
2821 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type); 2833 hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags6(cast_type);
2822 if (card->info.type == QETH_CARD_TYPE_IQD) 2834 if (card->info.type == QETH_CARD_TYPE_IQD)
2823 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU; 2835 hdr->hdr.l3.flags &= ~QETH_HDR_PASSTHRU;
2824 memcpy(hdr->hdr.l3.dest_addr, pkey, 16); 2836 if (n) {
2837 memcpy(hdr->hdr.l3.dest_addr,
2838 n->primary_key, 16);
2839 } else {
2840 /* fill in destination address used in ip header */
2841 memcpy(hdr->hdr.l3.dest_addr,
2842 &ipv6_hdr(skb)->daddr, 16);
2843 }
2825 } else { 2844 } else {
2826 if (!memcmp(skb->data + sizeof(struct qeth_hdr), 2845 /* passthrough */
2846 if ((skb->dev->type == ARPHRD_IEEE802_TR) &&
2847 !memcmp(skb->data + sizeof(struct qeth_hdr) +
2848 sizeof(__u16), skb->dev->broadcast, 6)) {
2849 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
2850 QETH_HDR_PASSTHRU;
2851 } else if (!memcmp(skb->data + sizeof(struct qeth_hdr),
2827 skb->dev->broadcast, 6)) { 2852 skb->dev->broadcast, 6)) {
2828 /* broadcast? */ 2853 /* broadcast? */
2829 hdr->hdr.l3.flags = QETH_CAST_BROADCAST | 2854 hdr->hdr.l3.flags = QETH_CAST_BROADCAST |
@@ -2918,11 +2943,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2918 int data_offset = -1; 2943 int data_offset = -1;
2919 int nr_frags; 2944 int nr_frags;
2920 2945
2921 if (((card->info.type == QETH_CARD_TYPE_IQD) && 2946 if (((card->info.type == QETH_CARD_TYPE_IQD) && (!ipv)) ||
2922 (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || 2947 card->options.sniffer)
2923 ((card->options.cq == QETH_CQ_ENABLED) &&
2924 (skb->protocol != ETH_P_AF_IUCV)))) ||
2925 card->options.sniffer)
2926 goto tx_drop; 2948 goto tx_drop;
2927 2949
2928 if ((card->state != CARD_STATE_UP) || !card->lan_online) { 2950 if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -2944,10 +2966,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2944 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) && 2966 if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
2945 (skb_shinfo(skb)->nr_frags == 0)) { 2967 (skb_shinfo(skb)->nr_frags == 0)) {
2946 new_skb = skb; 2968 new_skb = skb;
2947 if (new_skb->protocol == ETH_P_AF_IUCV) 2969 data_offset = ETH_HLEN;
2948 data_offset = 0;
2949 else
2950 data_offset = ETH_HLEN;
2951 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); 2970 hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
2952 if (!hdr) 2971 if (!hdr)
2953 goto tx_drop; 2972 goto tx_drop;
@@ -2965,7 +2984,10 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2965 skb_pull(new_skb, ETH_HLEN); 2984 skb_pull(new_skb, ETH_HLEN);
2966 } else { 2985 } else {
2967 if (ipv == 4) { 2986 if (ipv == 4) {
2968 skb_pull(new_skb, ETH_HLEN); 2987 if (card->dev->type == ARPHRD_IEEE802_TR)
2988 skb_pull(new_skb, TR_HLEN);
2989 else
2990 skb_pull(new_skb, ETH_HLEN);
2969 } 2991 }
2970 2992
2971 if (ipv != 4 && vlan_tx_tag_present(new_skb)) { 2993 if (ipv != 4 && vlan_tx_tag_present(new_skb)) {
@@ -2978,6 +3000,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2978 tag = (u16 *)(new_skb->data + 12); 3000 tag = (u16 *)(new_skb->data + 12);
2979 *tag = __constant_htons(ETH_P_8021Q); 3001 *tag = __constant_htons(ETH_P_8021Q);
2980 *(tag + 1) = htons(vlan_tx_tag_get(new_skb)); 3002 *(tag + 1) = htons(vlan_tx_tag_get(new_skb));
3003 new_skb->vlan_tci = 0;
2981 } 3004 }
2982 } 3005 }
2983 3006
@@ -3009,13 +3032,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
3009 qeth_l3_fill_header(card, hdr, new_skb, ipv, 3032 qeth_l3_fill_header(card, hdr, new_skb, ipv,
3010 cast_type); 3033 cast_type);
3011 } else { 3034 } else {
3012 if (new_skb->protocol == ETH_P_AF_IUCV) 3035 qeth_l3_fill_header(card, hdr, new_skb, ipv,
3013 qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); 3036 cast_type);
3014 else { 3037 hdr->hdr.l3.length = new_skb->len - data_offset;
3015 qeth_l3_fill_header(card, hdr, new_skb, ipv,
3016 cast_type);
3017 hdr->hdr.l3.length = new_skb->len - data_offset;
3018 }
3019 } 3038 }
3020 3039
3021 if (skb->ip_summed == CHECKSUM_PARTIAL) 3040 if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -3141,8 +3160,7 @@ static int qeth_l3_stop(struct net_device *dev)
3141 return 0; 3160 return 0;
3142} 3161}
3143 3162
3144static netdev_features_t qeth_l3_fix_features(struct net_device *dev, 3163static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
3145 netdev_features_t features)
3146{ 3164{
3147 struct qeth_card *card = dev->ml_priv; 3165 struct qeth_card *card = dev->ml_priv;
3148 3166
@@ -3156,8 +3174,7 @@ static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
3156 return features; 3174 return features;
3157} 3175}
3158 3176
3159static int qeth_l3_set_features(struct net_device *dev, 3177static int qeth_l3_set_features(struct net_device *dev, u32 features)
3160 netdev_features_t features)
3161{ 3178{
3162 struct qeth_card *card = dev->ml_priv; 3179 struct qeth_card *card = dev->ml_priv;
3163 u32 changed = dev->features ^ features; 3180 u32 changed = dev->features ^ features;
@@ -3216,7 +3233,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
3216 .ndo_get_stats = qeth_get_stats, 3233 .ndo_get_stats = qeth_get_stats,
3217 .ndo_start_xmit = qeth_l3_hard_start_xmit, 3234 .ndo_start_xmit = qeth_l3_hard_start_xmit,
3218 .ndo_validate_addr = eth_validate_addr, 3235 .ndo_validate_addr = eth_validate_addr,
3219 .ndo_set_rx_mode = qeth_l3_set_multicast_list, 3236 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3220 .ndo_do_ioctl = qeth_l3_do_ioctl, 3237 .ndo_do_ioctl = qeth_l3_do_ioctl,
3221 .ndo_change_mtu = qeth_change_mtu, 3238 .ndo_change_mtu = qeth_change_mtu,
3222 .ndo_fix_features = qeth_l3_fix_features, 3239 .ndo_fix_features = qeth_l3_fix_features,
@@ -3232,7 +3249,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
3232 .ndo_get_stats = qeth_get_stats, 3249 .ndo_get_stats = qeth_get_stats,
3233 .ndo_start_xmit = qeth_l3_hard_start_xmit, 3250 .ndo_start_xmit = qeth_l3_hard_start_xmit,
3234 .ndo_validate_addr = eth_validate_addr, 3251 .ndo_validate_addr = eth_validate_addr,
3235 .ndo_set_rx_mode = qeth_l3_set_multicast_list, 3252 .ndo_set_multicast_list = qeth_l3_set_multicast_list,
3236 .ndo_do_ioctl = qeth_l3_do_ioctl, 3253 .ndo_do_ioctl = qeth_l3_do_ioctl,
3237 .ndo_change_mtu = qeth_change_mtu, 3254 .ndo_change_mtu = qeth_change_mtu,
3238 .ndo_fix_features = qeth_l3_fix_features, 3255 .ndo_fix_features = qeth_l3_fix_features,
@@ -3249,8 +3266,12 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3249 card->info.type == QETH_CARD_TYPE_OSX) { 3266 card->info.type == QETH_CARD_TYPE_OSX) {
3250 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || 3267 if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
3251 (card->info.link_type == QETH_LINK_TYPE_HSTR)) { 3268 (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
3252 pr_info("qeth_l3: ignoring TR device\n"); 3269#ifdef CONFIG_TR
3253 return -ENODEV; 3270 card->dev = alloc_trdev(0);
3271#endif
3272 if (!card->dev)
3273 return -ENODEV;
3274 card->dev->netdev_ops = &qeth_l3_netdev_ops;
3254 } else { 3275 } else {
3255 card->dev = alloc_etherdev(0); 3276 card->dev = alloc_etherdev(0);
3256 if (!card->dev) 3277 if (!card->dev)
@@ -3276,8 +3297,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3276 card->dev->flags |= IFF_NOARP; 3297 card->dev->flags |= IFF_NOARP;
3277 card->dev->netdev_ops = &qeth_l3_netdev_ops; 3298 card->dev->netdev_ops = &qeth_l3_netdev_ops;
3278 qeth_l3_iqd_read_initial_mac(card); 3299 qeth_l3_iqd_read_initial_mac(card);
3279 if (card->options.hsuid[0])
3280 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
3281 } else 3300 } else
3282 return -ENODEV; 3301 return -ENODEV;
3283 3302
@@ -3303,6 +3322,12 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
3303 qeth_l3_create_device_attributes(&gdev->dev); 3322 qeth_l3_create_device_attributes(&gdev->dev);
3304 card->options.layer2 = 0; 3323 card->options.layer2 = 0;
3305 card->info.hwtrap = 0; 3324 card->info.hwtrap = 0;
3325 card->discipline.start_poll = qeth_qdio_start_poll;
3326 card->discipline.input_handler = (qdio_handler_t *)
3327 qeth_qdio_input_handler;
3328 card->discipline.output_handler = (qdio_handler_t *)
3329 qeth_qdio_output_handler;
3330 card->discipline.recover = qeth_l3_recover;
3306 return 0; 3331 return 0;
3307} 3332}
3308 3333
@@ -3334,6 +3359,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3334 int rc = 0; 3359 int rc = 0;
3335 enum qeth_card_states recover_flag; 3360 enum qeth_card_states recover_flag;
3336 3361
3362 BUG_ON(!card);
3337 mutex_lock(&card->discipline_mutex); 3363 mutex_lock(&card->discipline_mutex);
3338 mutex_lock(&card->conf_mutex); 3364 mutex_lock(&card->conf_mutex);
3339 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 3365 QETH_DBF_TEXT(SETUP, 2, "setonlin");
@@ -3346,7 +3372,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3346 rc = -ENODEV; 3372 rc = -ENODEV;
3347 goto out_remove; 3373 goto out_remove;
3348 } 3374 }
3349 qeth_trace_features(card);
3350 3375
3351 if (!card->dev && qeth_l3_setup_netdev(card)) { 3376 if (!card->dev && qeth_l3_setup_netdev(card)) {
3352 rc = -ENODEV; 3377 rc = -ENODEV;
@@ -3413,13 +3438,14 @@ contin:
3413 else 3438 else
3414 netif_carrier_off(card->dev); 3439 netif_carrier_off(card->dev);
3415 if (recover_flag == CARD_STATE_RECOVER) { 3440 if (recover_flag == CARD_STATE_RECOVER) {
3416 rtnl_lock();
3417 if (recovery_mode) 3441 if (recovery_mode)
3418 __qeth_l3_open(card->dev); 3442 __qeth_l3_open(card->dev);
3419 else 3443 else {
3444 rtnl_lock();
3420 dev_open(card->dev); 3445 dev_open(card->dev);
3446 rtnl_unlock();
3447 }
3421 qeth_l3_set_multicast_list(card->dev); 3448 qeth_l3_set_multicast_list(card->dev);
3422 rtnl_unlock();
3423 } 3449 }
3424 /* let user_space know that device is online */ 3450 /* let user_space know that device is online */
3425 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 3451 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3465,11 +3491,6 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
3465 card->info.hwtrap = 1; 3491 card->info.hwtrap = 1;
3466 } 3492 }
3467 qeth_l3_stop_card(card, recovery_mode); 3493 qeth_l3_stop_card(card, recovery_mode);
3468 if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
3469 rtnl_lock();
3470 call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
3471 rtnl_unlock();
3472 }
3473 rc = ccw_device_set_offline(CARD_DDEV(card)); 3494 rc = ccw_device_set_offline(CARD_DDEV(card));
3474 rc2 = ccw_device_set_offline(CARD_WDEV(card)); 3495 rc2 = ccw_device_set_offline(CARD_WDEV(card));
3475 rc3 = ccw_device_set_offline(CARD_RDEV(card)); 3496 rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3510,12 +3531,11 @@ static int qeth_l3_recover(void *ptr)
3510 dev_info(&card->gdev->dev, 3531 dev_info(&card->gdev->dev,
3511 "Device successfully recovered!\n"); 3532 "Device successfully recovered!\n");
3512 else { 3533 else {
3513 if (rtnl_trylock()) { 3534 rtnl_lock();
3514 dev_close(card->dev); 3535 dev_close(card->dev);
3515 rtnl_unlock(); 3536 rtnl_unlock();
3516 dev_warn(&card->gdev->dev, "The qeth device driver " 3537 dev_warn(&card->gdev->dev, "The qeth device driver "
3517 "failed to recover an error on the device\n"); 3538 "failed to recover an error on the device\n");
3518 }
3519 } 3539 }
3520 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); 3540 qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
3521 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); 3541 qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
@@ -3525,7 +3545,6 @@ static int qeth_l3_recover(void *ptr)
3525static void qeth_l3_shutdown(struct ccwgroup_device *gdev) 3545static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
3526{ 3546{
3527 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 3547 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
3528 qeth_set_allowed_threads(card, 0, 1);
3529 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) 3548 if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
3530 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); 3549 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
3531 qeth_qdio_clear_card(card, 0); 3550 qeth_qdio_clear_card(card, 0);
@@ -3578,12 +3597,8 @@ out:
3578 return rc; 3597 return rc;
3579} 3598}
3580 3599
3581struct qeth_discipline qeth_l3_discipline = { 3600struct ccwgroup_driver qeth_l3_ccwgroup_driver = {
3582 .start_poll = qeth_qdio_start_poll, 3601 .probe = qeth_l3_probe_device,
3583 .input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
3584 .output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
3585 .recover = qeth_l3_recover,
3586 .setup = qeth_l3_probe_device,
3587 .remove = qeth_l3_remove_device, 3602 .remove = qeth_l3_remove_device,
3588 .set_online = qeth_l3_set_online, 3603 .set_online = qeth_l3_set_online,
3589 .set_offline = qeth_l3_set_offline, 3604 .set_offline = qeth_l3_set_offline,
@@ -3592,7 +3607,7 @@ struct qeth_discipline qeth_l3_discipline = {
3592 .thaw = qeth_l3_pm_resume, 3607 .thaw = qeth_l3_pm_resume,
3593 .restore = qeth_l3_pm_resume, 3608 .restore = qeth_l3_pm_resume,
3594}; 3609};
3595EXPORT_SYMBOL_GPL(qeth_l3_discipline); 3610EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver);
3596 3611
3597static int qeth_l3_ip_event(struct notifier_block *this, 3612static int qeth_l3_ip_event(struct notifier_block *this,
3598 unsigned long event, void *ptr) 3613 unsigned long event, void *ptr)
@@ -3606,9 +3621,9 @@ static int qeth_l3_ip_event(struct notifier_block *this,
3606 return NOTIFY_DONE; 3621 return NOTIFY_DONE;
3607 3622
3608 card = qeth_l3_get_card_from_dev(dev); 3623 card = qeth_l3_get_card_from_dev(dev);
3624 QETH_CARD_TEXT(card, 3, "ipevent");
3609 if (!card) 3625 if (!card)
3610 return NOTIFY_DONE; 3626 return NOTIFY_DONE;
3611 QETH_CARD_TEXT(card, 3, "ipevent");
3612 3627
3613 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); 3628 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
3614 if (addr != NULL) { 3629 if (addr != NULL) {
@@ -3652,6 +3667,7 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
3652 struct qeth_ipaddr *addr; 3667 struct qeth_ipaddr *addr;
3653 struct qeth_card *card; 3668 struct qeth_card *card;
3654 3669
3670
3655 card = qeth_l3_get_card_from_dev(dev); 3671 card = qeth_l3_get_card_from_dev(dev);
3656 if (!card) 3672 if (!card)
3657 return NOTIFY_DONE; 3673 return NOTIFY_DONE;
@@ -3714,9 +3730,9 @@ static void qeth_l3_unregister_notifiers(void)
3714{ 3730{
3715 3731
3716 QETH_DBF_TEXT(SETUP, 5, "unregnot"); 3732 QETH_DBF_TEXT(SETUP, 5, "unregnot");
3717 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 3733 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
3718#ifdef CONFIG_QETH_IPV6 3734#ifdef CONFIG_QETH_IPV6
3719 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 3735 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
3720#endif /* QETH_IPV6 */ 3736#endif /* QETH_IPV6 */
3721} 3737}
3722 3738
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index ebc37948626..cd99210296e 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * drivers/s390/net/qeth_l3_sys.c
3 *
2 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007
3 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
4 * Frank Pavlic <fpavlic@de.ibm.com>, 6 * Frank Pavlic <fpavlic@de.ibm.com>,
@@ -7,7 +9,7 @@
7 */ 9 */
8 10
9#include <linux/slab.h> 11#include <linux/slab.h>
10#include <asm/ebcdic.h> 12
11#include "qeth_l3.h" 13#include "qeth_l3.h"
12 14
13#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ 15#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
@@ -173,32 +175,33 @@ out:
173static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show, 175static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
174 qeth_l3_dev_fake_broadcast_store); 176 qeth_l3_dev_fake_broadcast_store);
175 177
176static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, 178static ssize_t qeth_l3_dev_broadcast_mode_show(struct device *dev,
177 struct device_attribute *attr, char *buf) 179 struct device_attribute *attr, char *buf)
178{ 180{
179 struct qeth_card *card = dev_get_drvdata(dev); 181 struct qeth_card *card = dev_get_drvdata(dev);
180 182
181 if (!card) 183 if (!card)
182 return -EINVAL; 184 return -EINVAL;
183 185
184 return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0); 186 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
187 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
188 return sprintf(buf, "n/a\n");
189
190 return sprintf(buf, "%s\n", (card->options.broadcast_mode ==
191 QETH_TR_BROADCAST_ALLRINGS)?
192 "all rings":"local");
185} 193}
186 194
187static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, 195static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev,
188 struct device_attribute *attr, const char *buf, size_t count) 196 struct device_attribute *attr, const char *buf, size_t count)
189{ 197{
190 struct qeth_card *card = dev_get_drvdata(dev); 198 struct qeth_card *card = dev_get_drvdata(dev);
199 char *tmp;
191 int rc = 0; 200 int rc = 0;
192 unsigned long i;
193 201
194 if (!card) 202 if (!card)
195 return -EINVAL; 203 return -EINVAL;
196 204
197 if (card->info.type != QETH_CARD_TYPE_IQD)
198 return -EPERM;
199 if (card->options.cq == QETH_CQ_ENABLED)
200 return -EPERM;
201
202 mutex_lock(&card->conf_mutex); 205 mutex_lock(&card->conf_mutex);
203 if ((card->state != CARD_STATE_DOWN) && 206 if ((card->state != CARD_STATE_DOWN) &&
204 (card->state != CARD_STATE_RECOVER)) { 207 (card->state != CARD_STATE_RECOVER)) {
@@ -206,148 +209,151 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
206 goto out; 209 goto out;
207 } 210 }
208 211
209 rc = strict_strtoul(buf, 16, &i); 212 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
210 if (rc) { 213 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
211 rc = -EINVAL; 214 rc = -EINVAL;
212 goto out; 215 goto out;
213 } 216 }
214 switch (i) { 217
215 case 0: 218 tmp = strsep((char **) &buf, "\n");
216 card->options.sniffer = i; 219
217 break; 220 if (!strcmp(tmp, "local"))
218 case 1: 221 card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL;
219 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); 222 else if (!strcmp(tmp, "all_rings"))
220 if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) { 223 card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
221 card->options.sniffer = i; 224 else
222 if (card->qdio.init_pool.buf_count !=
223 QETH_IN_BUF_COUNT_MAX)
224 qeth_realloc_buffer_pool(card,
225 QETH_IN_BUF_COUNT_MAX);
226 } else
227 rc = -EPERM;
228 break;
229 default:
230 rc = -EINVAL; 225 rc = -EINVAL;
231 }
232out: 226out:
233 mutex_unlock(&card->conf_mutex); 227 mutex_unlock(&card->conf_mutex);
234 return rc ? rc : count; 228 return rc ? rc : count;
235} 229}
236 230
237static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, 231static DEVICE_ATTR(broadcast_mode, 0644, qeth_l3_dev_broadcast_mode_show,
238 qeth_l3_dev_sniffer_store); 232 qeth_l3_dev_broadcast_mode_store);
239 233
240 234static ssize_t qeth_l3_dev_canonical_macaddr_show(struct device *dev,
241static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, 235 struct device_attribute *attr, char *buf)
242 struct device_attribute *attr, char *buf)
243{ 236{
244 struct qeth_card *card = dev_get_drvdata(dev); 237 struct qeth_card *card = dev_get_drvdata(dev);
245 char tmp_hsuid[9];
246 238
247 if (!card) 239 if (!card)
248 return -EINVAL; 240 return -EINVAL;
249 241
250 if (card->info.type != QETH_CARD_TYPE_IQD) 242 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
251 return -EPERM; 243 (card->info.link_type == QETH_LINK_TYPE_LANE_TR)))
252 244 return sprintf(buf, "n/a\n");
253 if (card->state == CARD_STATE_DOWN)
254 return -EPERM;
255 245
256 memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); 246 return sprintf(buf, "%i\n", (card->options.macaddr_mode ==
257 EBCASC(tmp_hsuid, 8); 247 QETH_TR_MACADDR_CANONICAL)? 1:0);
258 return sprintf(buf, "%s\n", tmp_hsuid);
259} 248}
260 249
261static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, 250static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev,
262 struct device_attribute *attr, const char *buf, size_t count) 251 struct device_attribute *attr, const char *buf, size_t count)
263{ 252{
264 struct qeth_card *card = dev_get_drvdata(dev); 253 struct qeth_card *card = dev_get_drvdata(dev);
265 struct qeth_ipaddr *addr;
266 char *tmp; 254 char *tmp;
267 int i; 255 int i, rc = 0;
268 256
269 if (!card) 257 if (!card)
270 return -EINVAL; 258 return -EINVAL;
271 259
272 if (card->info.type != QETH_CARD_TYPE_IQD) 260 mutex_lock(&card->conf_mutex);
273 return -EPERM; 261 if ((card->state != CARD_STATE_DOWN) &&
274 if (card->state != CARD_STATE_DOWN && 262 (card->state != CARD_STATE_RECOVER)) {
275 card->state != CARD_STATE_RECOVER) 263 rc = -EPERM;
276 return -EPERM; 264 goto out;
277 if (card->options.sniffer) 265 }
278 return -EPERM; 266
279 if (card->options.cq == QETH_CQ_NOTAVAILABLE) 267 if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) ||
280 return -EPERM; 268 (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) {
269 rc = -EINVAL;
270 goto out;
271 }
281 272
282 tmp = strsep((char **)&buf, "\n"); 273 i = simple_strtoul(buf, &tmp, 16);
283 if (strlen(tmp) > 8) 274 if ((i == 0) || (i == 1))
275 card->options.macaddr_mode = i?
276 QETH_TR_MACADDR_CANONICAL :
277 QETH_TR_MACADDR_NONCANONICAL;
278 else
279 rc = -EINVAL;
280out:
281 mutex_unlock(&card->conf_mutex);
282 return rc ? rc : count;
283}
284
285static DEVICE_ATTR(canonical_macaddr, 0644, qeth_l3_dev_canonical_macaddr_show,
286 qeth_l3_dev_canonical_macaddr_store);
287
288static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
289 struct device_attribute *attr, char *buf)
290{
291 struct qeth_card *card = dev_get_drvdata(dev);
292
293 if (!card)
284 return -EINVAL; 294 return -EINVAL;
285 295
286 if (card->options.hsuid[0]) { 296 return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
287 /* delete old ip address */ 297}
288 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
289 if (addr != NULL) {
290 addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
291 addr->u.a6.addr.s6_addr32[1] = 0x00000000;
292 for (i = 8; i < 16; i++)
293 addr->u.a6.addr.s6_addr[i] =
294 card->options.hsuid[i - 8];
295 addr->u.a6.pfxlen = 0;
296 addr->type = QETH_IP_TYPE_NORMAL;
297 } else
298 return -ENOMEM;
299 if (!qeth_l3_delete_ip(card, addr))
300 kfree(addr);
301 qeth_l3_set_ip_addr_list(card);
302 }
303 298
304 if (strlen(tmp) == 0) { 299static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
305 /* delete ip address only */ 300 struct device_attribute *attr, const char *buf, size_t count)
306 card->options.hsuid[0] = '\0'; 301{
307 if (card->dev) 302 struct qeth_card *card = dev_get_drvdata(dev);
308 memcpy(card->dev->perm_addr, card->options.hsuid, 9); 303 int rc = 0;
309 qeth_configure_cq(card, QETH_CQ_DISABLED); 304 unsigned long i;
310 return count;
311 }
312 305
313 if (qeth_configure_cq(card, QETH_CQ_ENABLED)) 306 if (!card)
307 return -EINVAL;
308
309 if (card->info.type != QETH_CARD_TYPE_IQD)
314 return -EPERM; 310 return -EPERM;
315 311
316 for (i = 0; i < 8; i++) 312 mutex_lock(&card->conf_mutex);
317 card->options.hsuid[i] = ' '; 313 if ((card->state != CARD_STATE_DOWN) &&
318 card->options.hsuid[8] = '\0'; 314 (card->state != CARD_STATE_RECOVER)) {
319 strncpy(card->options.hsuid, tmp, strlen(tmp)); 315 rc = -EPERM;
320 ASCEBC(card->options.hsuid, 8); 316 goto out;
321 if (card->dev) 317 }
322 memcpy(card->dev->perm_addr, card->options.hsuid, 9);
323
324 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
325 if (addr != NULL) {
326 addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
327 addr->u.a6.addr.s6_addr32[1] = 0x00000000;
328 for (i = 8; i < 16; i++)
329 addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8];
330 addr->u.a6.pfxlen = 0;
331 addr->type = QETH_IP_TYPE_NORMAL;
332 } else
333 return -ENOMEM;
334 if (!qeth_l3_add_ip(card, addr))
335 kfree(addr);
336 qeth_l3_set_ip_addr_list(card);
337 318
338 return count; 319 rc = strict_strtoul(buf, 16, &i);
320 if (rc) {
321 rc = -EINVAL;
322 goto out;
323 }
324 switch (i) {
325 case 0:
326 card->options.sniffer = i;
327 break;
328 case 1:
329 qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
330 if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
331 card->options.sniffer = i;
332 if (card->qdio.init_pool.buf_count !=
333 QETH_IN_BUF_COUNT_MAX)
334 qeth_realloc_buffer_pool(card,
335 QETH_IN_BUF_COUNT_MAX);
336 break;
337 } else
338 rc = -EPERM;
339 default: /* fall through */
340 rc = -EINVAL;
341 }
342out:
343 mutex_unlock(&card->conf_mutex);
344 return rc ? rc : count;
339} 345}
340 346
341static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show, 347static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
342 qeth_l3_dev_hsuid_store); 348 qeth_l3_dev_sniffer_store);
343
344 349
345static struct attribute *qeth_l3_device_attrs[] = { 350static struct attribute *qeth_l3_device_attrs[] = {
346 &dev_attr_route4.attr, 351 &dev_attr_route4.attr,
347 &dev_attr_route6.attr, 352 &dev_attr_route6.attr,
348 &dev_attr_fake_broadcast.attr, 353 &dev_attr_fake_broadcast.attr,
354 &dev_attr_broadcast_mode.attr,
355 &dev_attr_canonical_macaddr.attr,
349 &dev_attr_sniffer.attr, 356 &dev_attr_sniffer.attr,
350 &dev_attr_hsuid.attr,
351 NULL, 357 NULL,
352}; 358};
353 359
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index d8f990b6b33..207b7d74244 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -157,7 +157,7 @@ static int smsg_pm_restore_thaw(struct device *dev)
157#ifdef CONFIG_PM_DEBUG 157#ifdef CONFIG_PM_DEBUG
158 printk(KERN_WARNING "smsg_pm_restore_thaw\n"); 158 printk(KERN_WARNING "smsg_pm_restore_thaw\n");
159#endif 159#endif
160 if (smsg_path && !iucv_path_connected) { 160 if (smsg_path && iucv_path_connected) {
161 memset(smsg_path, 0, sizeof(*smsg_path)); 161 memset(smsg_path, 0, sizeof(*smsg_path));
162 smsg_path->msglim = 255; 162 smsg_path->msglim = 255;
163 smsg_path->flags = 0; 163 smsg_path->flags = 0;
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
index 45bc925928c..149a1151608 100644
--- a/drivers/s390/net/smsgiucv.h
+++ b/drivers/s390/net/smsgiucv.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * IUCV special message driver 2 * IUCV special message driver
3 * 3 *
4 * Copyright IBM Corp. 2003 4 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */ 6 */
7 7
diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c
index 32515a201bb..4d2ea400042 100644
--- a/drivers/s390/net/smsgiucv_app.c
+++ b/drivers/s390/net/smsgiucv_app.c
@@ -168,7 +168,7 @@ static int __init smsgiucv_app_init(void)
168 rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT); 168 rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT);
169 if (rc) { 169 if (rc) {
170 kfree(smsg_app_dev); 170 kfree(smsg_app_dev);
171 goto fail; 171 goto fail_put_driver;
172 } 172 }
173 smsg_app_dev->bus = &iucv_bus; 173 smsg_app_dev->bus = &iucv_bus;
174 smsg_app_dev->parent = iucv_root; 174 smsg_app_dev->parent = iucv_root;
@@ -177,7 +177,7 @@ static int __init smsgiucv_app_init(void)
177 rc = device_register(smsg_app_dev); 177 rc = device_register(smsg_app_dev);
178 if (rc) { 178 if (rc) {
179 put_device(smsg_app_dev); 179 put_device(smsg_app_dev);
180 goto fail; 180 goto fail_put_driver;
181 } 181 }
182 182
183 /* convert sender to uppercase characters */ 183 /* convert sender to uppercase characters */
@@ -191,11 +191,12 @@ static int __init smsgiucv_app_init(void)
191 rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback); 191 rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
192 if (rc) { 192 if (rc) {
193 device_unregister(smsg_app_dev); 193 device_unregister(smsg_app_dev);
194 goto fail; 194 goto fail_put_driver;
195 } 195 }
196 196
197 rc = 0; 197 rc = 0;
198fail: 198fail_put_driver:
199 put_driver(smsgiucv_drv);
199 return rc; 200 return rc;
200} 201}
201module_init(smsgiucv_app_init); 202module_init(smsgiucv_app_init);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index f6adde44f22..645b0fcbb37 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Module interface and handling of zfcp data structures. 4 * Module interface and handling of zfcp data structures.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9/* 9/*
@@ -31,7 +31,6 @@
31#include <linux/miscdevice.h> 31#include <linux/miscdevice.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/module.h>
35#include "zfcp_ext.h" 34#include "zfcp_ext.h"
36#include "zfcp_fc.h" 35#include "zfcp_fc.h"
37#include "zfcp_reqlist.h" 36#include "zfcp_reqlist.h"
@@ -519,7 +518,6 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
519 518
520 rwlock_init(&port->unit_list_lock); 519 rwlock_init(&port->unit_list_lock);
521 INIT_LIST_HEAD(&port->unit_list); 520 INIT_LIST_HEAD(&port->unit_list);
522 atomic_set(&port->units, 0);
523 521
524 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup); 522 INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
525 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work); 523 INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index f2dd3a0a39e..e8b7cee6204 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,13 +3,12 @@
3 * 3 *
4 * Registration and callback for the s390 common I/O layer. 4 * Registration and callback for the s390 common I/O layer.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/module.h>
13#include "zfcp_ext.h" 12#include "zfcp_ext.h"
14#include "zfcp_reqlist.h" 13#include "zfcp_reqlist.h"
15 14
@@ -39,25 +38,19 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
39 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags); 38 spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
40} 39}
41 40
42/** 41static int zfcp_ccw_activate(struct ccw_device *cdev)
43 * zfcp_ccw_activate - activate adapter and wait for it to finish 42
44 * @cdev: pointer to belonging ccw device
45 * @clear: Status flags to clear.
46 * @tag: s390dbf trace record tag
47 */
48static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
49{ 43{
50 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 44 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
51 45
52 if (!adapter) 46 if (!adapter)
53 return 0; 47 return 0;
54 48
55 zfcp_erp_clear_adapter_status(adapter, clear);
56 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING); 49 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
57 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 50 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
58 tag); 51 "ccresu2");
59 zfcp_erp_wait(adapter); 52 zfcp_erp_wait(adapter);
60 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */ 53 flush_work(&adapter->scan_work);
61 54
62 zfcp_ccw_adapter_put(adapter); 55 zfcp_ccw_adapter_put(adapter);
63 56
@@ -170,34 +163,26 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
170 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list)); 163 BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
171 adapter->req_no = 0; 164 adapter->req_no = 0;
172 165
173 zfcp_ccw_activate(cdev, 0, "ccsonl1"); 166 zfcp_ccw_activate(cdev);
174 /* scan for remote ports
175 either at the end of any successful adapter recovery
176 or only after the adapter recovery for setting a device online */
177 zfcp_fc_inverse_conditional_port_scan(adapter);
178 flush_work(&adapter->scan_work); /* ok to call even if nothing queued */
179 zfcp_ccw_adapter_put(adapter); 167 zfcp_ccw_adapter_put(adapter);
180 return 0; 168 return 0;
181} 169}
182 170
183/** 171/**
184 * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish 172 * zfcp_ccw_set_offline - set_offline function of zfcp driver
185 * @cdev: pointer to belonging ccw device 173 * @cdev: pointer to belonging ccw device
186 * @set: Status flags to set.
187 * @tag: s390dbf trace record tag
188 * 174 *
189 * This function gets called by the common i/o layer and sets an adapter 175 * This function gets called by the common i/o layer and sets an adapter
190 * into state offline. 176 * into state offline.
191 */ 177 */
192static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag) 178static int zfcp_ccw_set_offline(struct ccw_device *cdev)
193{ 179{
194 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev); 180 struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
195 181
196 if (!adapter) 182 if (!adapter)
197 return 0; 183 return 0;
198 184
199 zfcp_erp_set_adapter_status(adapter, set); 185 zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
200 zfcp_erp_adapter_shutdown(adapter, 0, tag);
201 zfcp_erp_wait(adapter); 186 zfcp_erp_wait(adapter);
202 187
203 zfcp_ccw_adapter_put(adapter); 188 zfcp_ccw_adapter_put(adapter);
@@ -205,18 +190,6 @@ static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
205} 190}
206 191
207/** 192/**
208 * zfcp_ccw_set_offline - set_offline function of zfcp driver
209 * @cdev: pointer to belonging ccw device
210 *
211 * This function gets called by the common i/o layer and sets an adapter
212 * into state offline.
213 */
214static int zfcp_ccw_set_offline(struct ccw_device *cdev)
215{
216 return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
217}
218
219/**
220 * zfcp_ccw_notify - ccw notify function 193 * zfcp_ccw_notify - ccw notify function
221 * @cdev: pointer to belonging ccw device 194 * @cdev: pointer to belonging ccw device
222 * @event: indicates if adapter was detached or attached 195 * @event: indicates if adapter was detached or attached
@@ -233,11 +206,6 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
233 206
234 switch (event) { 207 switch (event) {
235 case CIO_GONE: 208 case CIO_GONE:
236 if (atomic_read(&adapter->status) &
237 ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
238 zfcp_dbf_hba_basic("ccnigo1", adapter);
239 break;
240 }
241 dev_warn(&cdev->dev, "The FCP device has been detached\n"); 209 dev_warn(&cdev->dev, "The FCP device has been detached\n");
242 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1"); 210 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
243 break; 211 break;
@@ -247,11 +215,6 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
247 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2"); 215 zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
248 break; 216 break;
249 case CIO_OPER: 217 case CIO_OPER:
250 if (atomic_read(&adapter->status) &
251 ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
252 zfcp_dbf_hba_basic("ccniop1", adapter);
253 break;
254 }
255 dev_info(&cdev->dev, "The FCP device is operational again\n"); 218 dev_info(&cdev->dev, "The FCP device is operational again\n");
256 zfcp_erp_set_adapter_status(adapter, 219 zfcp_erp_set_adapter_status(adapter,
257 ZFCP_STATUS_COMMON_RUNNING); 220 ZFCP_STATUS_COMMON_RUNNING);
@@ -287,28 +250,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
287 zfcp_ccw_adapter_put(adapter); 250 zfcp_ccw_adapter_put(adapter);
288} 251}
289 252
290static int zfcp_ccw_suspend(struct ccw_device *cdev)
291{
292 zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
293 return 0;
294}
295
296static int zfcp_ccw_thaw(struct ccw_device *cdev)
297{
298 /* trace records for thaw and final shutdown during suspend
299 can only be found in system dump until the end of suspend
300 but not after resume because it's based on the memory image
301 right after the very first suspend (freeze) callback */
302 zfcp_ccw_activate(cdev, 0, "ccthaw1");
303 return 0;
304}
305
306static int zfcp_ccw_resume(struct ccw_device *cdev)
307{
308 zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
309 return 0;
310}
311
312struct ccw_driver zfcp_ccw_driver = { 253struct ccw_driver zfcp_ccw_driver = {
313 .driver = { 254 .driver = {
314 .owner = THIS_MODULE, 255 .owner = THIS_MODULE,
@@ -321,7 +262,7 @@ struct ccw_driver zfcp_ccw_driver = {
321 .set_offline = zfcp_ccw_set_offline, 262 .set_offline = zfcp_ccw_set_offline,
322 .notify = zfcp_ccw_notify, 263 .notify = zfcp_ccw_notify,
323 .shutdown = zfcp_ccw_shutdown, 264 .shutdown = zfcp_ccw_shutdown,
324 .freeze = zfcp_ccw_suspend, 265 .freeze = zfcp_ccw_set_offline,
325 .thaw = zfcp_ccw_thaw, 266 .thaw = zfcp_ccw_activate,
326 .restore = zfcp_ccw_resume, 267 .restore = zfcp_ccw_activate,
327}; 268};
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 49b82e46629..303dde09d29 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -5,13 +5,12 @@
5 * Access Control Lists / Control File Data Channel; 5 * Access Control Lists / Control File Data Channel;
6 * handling of response code and states for ports and LUNs. 6 * handling of response code and states for ports and LUNs.
7 * 7 *
8 * Copyright IBM Corp. 2008, 2010 8 * Copyright IBM Corporation 2008, 2010
9 */ 9 */
10 10
11#define KMSG_COMPONENT "zfcp" 11#define KMSG_COMPONENT "zfcp"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 13
14#include <linux/compat.h>
15#include <linux/slab.h> 14#include <linux/slab.h>
16#include <linux/types.h> 15#include <linux/types.h>
17#include <linux/miscdevice.h> 16#include <linux/miscdevice.h>
@@ -293,7 +292,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
293 } 292 }
294 read_unlock_irqrestore(&adapter->port_list_lock, flags); 293 read_unlock_irqrestore(&adapter->port_list_lock, flags);
295 294
296 shost_for_each_device(sdev, adapter->scsi_host) { 295 shost_for_each_device(sdev, port->adapter->scsi_host) {
297 zfcp_sdev = sdev_to_zfcp(sdev); 296 zfcp_sdev = sdev_to_zfcp(sdev);
298 status = atomic_read(&zfcp_sdev->status); 297 status = atomic_read(&zfcp_sdev->status);
299 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) || 298 if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index e1a8cc2526e..96d1462e0bf 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,13 +3,12 @@
3 * 3 *
4 * Debug traces for zfcp. 4 * Debug traces for zfcp.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/module.h>
13#include <linux/ctype.h> 12#include <linux/ctype.h>
14#include <linux/slab.h> 13#include <linux/slab.h>
15#include <asm/debug.h> 14#include <asm/debug.h>
@@ -164,62 +163,6 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
164 spin_unlock_irqrestore(&dbf->hba_lock, flags); 163 spin_unlock_irqrestore(&dbf->hba_lock, flags);
165} 164}
166 165
167/**
168 * zfcp_dbf_hba_def_err - trace event for deferred error messages
169 * @adapter: pointer to struct zfcp_adapter
170 * @req_id: request id which caused the deferred error message
171 * @scount: number of sbals incl. the signaling sbal
172 * @pl: array of all involved sbals
173 */
174void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
175 void **pl)
176{
177 struct zfcp_dbf *dbf = adapter->dbf;
178 struct zfcp_dbf_pay *payload = &dbf->pay_buf;
179 unsigned long flags;
180 u16 length;
181
182 if (!pl)
183 return;
184
185 spin_lock_irqsave(&dbf->pay_lock, flags);
186 memset(payload, 0, sizeof(*payload));
187
188 memcpy(payload->area, "def_err", 7);
189 payload->fsf_req_id = req_id;
190 payload->counter = 0;
191 length = min((u16)sizeof(struct qdio_buffer),
192 (u16)ZFCP_DBF_PAY_MAX_REC);
193
194 while (payload->counter < scount && (char *)pl[payload->counter]) {
195 memcpy(payload->data, (char *)pl[payload->counter], length);
196 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
197 payload->counter++;
198 }
199
200 spin_unlock_irqrestore(&dbf->pay_lock, flags);
201}
202
203/**
204 * zfcp_dbf_hba_basic - trace event for basic adapter events
205 * @adapter: pointer to struct zfcp_adapter
206 */
207void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
208{
209 struct zfcp_dbf *dbf = adapter->dbf;
210 struct zfcp_dbf_hba *rec = &dbf->hba_buf;
211 unsigned long flags;
212
213 spin_lock_irqsave(&dbf->hba_lock, flags);
214 memset(rec, 0, sizeof(*rec));
215
216 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
217 rec->id = ZFCP_DBF_HBA_BASIC;
218
219 debug_event(dbf->hba, 1, rec, sizeof(*rec));
220 spin_unlock_irqrestore(&dbf->hba_lock, flags);
221}
222
223static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, 166static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
224 struct zfcp_adapter *adapter, 167 struct zfcp_adapter *adapter,
225 struct zfcp_port *port, 168 struct zfcp_port *port,
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 3ac7a4b30dd..714f087eb7a 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -154,7 +154,6 @@ enum zfcp_dbf_hba_id {
154 ZFCP_DBF_HBA_RES = 1, 154 ZFCP_DBF_HBA_RES = 1,
155 ZFCP_DBF_HBA_USS = 2, 155 ZFCP_DBF_HBA_USS = 2,
156 ZFCP_DBF_HBA_BIT = 3, 156 ZFCP_DBF_HBA_BIT = 3,
157 ZFCP_DBF_HBA_BASIC = 4,
158}; 157};
159 158
160/** 159/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 1305955cbf5..527ba48eea5 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Global definitions for the zfcp device driver. 4 * Global definitions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef ZFCP_DEF_H 9#ifndef ZFCP_DEF_H
@@ -72,12 +72,10 @@ struct zfcp_reqlist;
72#define ZFCP_STATUS_COMMON_NOESC 0x00200000 72#define ZFCP_STATUS_COMMON_NOESC 0x00200000
73 73
74/* adapter status */ 74/* adapter status */
75#define ZFCP_STATUS_ADAPTER_MB_ACT 0x00000001
76#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 75#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
77#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 76#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
78#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 77#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
79#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010 78#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
80#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
81#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100 79#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
82#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200 80#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
83#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400 81#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
@@ -205,7 +203,6 @@ struct zfcp_port {
205 struct zfcp_adapter *adapter; /* adapter used to access port */ 203 struct zfcp_adapter *adapter; /* adapter used to access port */
206 struct list_head unit_list; /* head of logical unit list */ 204 struct list_head unit_list; /* head of logical unit list */
207 rwlock_t unit_list_lock; /* unit list lock */ 205 rwlock_t unit_list_lock; /* unit list lock */
208 atomic_t units; /* zfcp_unit count */
209 atomic_t status; /* status of this remote port */ 206 atomic_t status; /* status of this remote port */
210 u64 wwnn; /* WWNN if known */ 207 u64 wwnn; /* WWNN if known */
211 u64 wwpn; /* WWPN */ 208 u64 wwpn; /* WWPN */
@@ -317,10 +314,4 @@ struct zfcp_fsf_req {
317 void (*handler)(struct zfcp_fsf_req *); 314 void (*handler)(struct zfcp_fsf_req *);
318}; 315};
319 316
320static inline
321int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
322{
323 return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
324}
325
326#endif /* ZFCP_DEF_H */ 317#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 4133ab6e20f..e1b4f800e22 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Error Recovery Procedures (ERP). 4 * Error Recovery Procedures (ERP).
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -1230,7 +1230,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1230 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 1230 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
1231 if (result == ZFCP_ERP_SUCCEEDED) { 1231 if (result == ZFCP_ERP_SUCCEEDED) {
1232 register_service_level(&adapter->service_level); 1232 register_service_level(&adapter->service_level);
1233 zfcp_fc_conditional_port_scan(adapter); 1233 queue_work(adapter->work_queue, &adapter->scan_work);
1234 queue_work(adapter->work_queue, &adapter->ns_up_work); 1234 queue_work(adapter->work_queue, &adapter->ns_up_work);
1235 } else 1235 } else
1236 unregister_service_level(&adapter->service_level); 1236 unregister_service_level(&adapter->service_level);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 1d3dd3f7d69..03627cfd81c 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * External function declarations. 4 * External function declarations.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -53,8 +53,6 @@ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
53extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); 53extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); 54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
56extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
57extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
58extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); 56extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
59extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); 57extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
60extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); 58extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
@@ -99,8 +97,6 @@ extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
99extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); 97extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
100extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); 98extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
101extern void zfcp_fc_sym_name_update(struct work_struct *); 99extern void zfcp_fc_sym_name_update(struct work_struct *);
102extern void zfcp_fc_conditional_port_scan(struct zfcp_adapter *);
103extern void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *);
104 100
105/* zfcp_fsf.c */ 101/* zfcp_fsf.c */
106extern struct kmem_cache *zfcp_fsf_qtcb_cache; 102extern struct kmem_cache *zfcp_fsf_qtcb_cache;
@@ -161,7 +157,6 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
161extern struct attribute_group zfcp_sysfs_unit_attrs; 157extern struct attribute_group zfcp_sysfs_unit_attrs;
162extern struct attribute_group zfcp_sysfs_adapter_attrs; 158extern struct attribute_group zfcp_sysfs_adapter_attrs;
163extern struct attribute_group zfcp_sysfs_port_attrs; 159extern struct attribute_group zfcp_sysfs_port_attrs;
164extern struct mutex zfcp_sysfs_port_units_mutex;
165extern struct device_attribute *zfcp_sysfs_sdev_attrs[]; 160extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
166extern struct device_attribute *zfcp_sysfs_shost_attrs[]; 161extern struct device_attribute *zfcp_sysfs_shost_attrs[];
167 162
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index ff598cd68b2..297e6b71ce9 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Fibre Channel related functions for the zfcp device driver. 4 * Fibre Channel related functions for the zfcp device driver.
5 * 5 *
6 * Copyright IBM Corp. 2008, 2010 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -26,27 +26,6 @@ static u32 zfcp_fc_rscn_range_mask[] = {
26 [ELS_ADDR_FMT_FAB] = 0x000000, 26 [ELS_ADDR_FMT_FAB] = 0x000000,
27}; 27};
28 28
29static bool no_auto_port_rescan;
30module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
31MODULE_PARM_DESC(no_auto_port_rescan,
32 "no automatic port_rescan (default off)");
33
34void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
35{
36 if (no_auto_port_rescan)
37 return;
38
39 queue_work(adapter->work_queue, &adapter->scan_work);
40}
41
42void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
43{
44 if (!no_auto_port_rescan)
45 return;
46
47 queue_work(adapter->work_queue, &adapter->scan_work);
48}
49
50/** 29/**
51 * zfcp_fc_post_event - post event to userspace via fc_transport 30 * zfcp_fc_post_event - post event to userspace via fc_transport
52 * @work: work struct with enqueued events 31 * @work: work struct with enqueued events
@@ -227,7 +206,7 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
227 zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN, 206 zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
228 *(u32 *)page); 207 *(u32 *)page);
229 } 208 }
230 zfcp_fc_conditional_port_scan(fsf_req->adapter); 209 queue_work(fsf_req->adapter->work_queue, &fsf_req->adapter->scan_work);
231} 210}
232 211
233static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn) 212static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index b1d2024ed51..4561f3bf730 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
4 * Fibre Channel related definitions and inline functions for the zfcp 4 * Fibre Channel related definitions and inline functions for the zfcp
5 * device driver 5 * device driver
6 * 6 *
7 * Copyright IBM Corp. 2009 7 * Copyright IBM Corporation 2009
8 */ 8 */
9 9
10#ifndef ZFCP_FC_H 10#ifndef ZFCP_FC_H
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index c96320d79fb..022fb6a8cb8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implementation of FSF commands. 4 * Implementation of FSF commands.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
219 return; 219 return;
220 } 220 }
221 221
222 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 222 zfcp_dbf_hba_fsf_uss("fssrh_2", req);
223 223
224 switch (sr_buf->status_type) { 224 switch (sr_buf->status_type) {
225 case FSF_STATUS_READ_PORT_CLOSED: 225 case FSF_STATUS_READ_PORT_CLOSED:
@@ -257,7 +257,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED) 257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
258 zfcp_cfdc_adapter_access_changed(adapter); 258 zfcp_cfdc_adapter_access_changed(adapter);
259 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 259 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
260 zfcp_fc_conditional_port_scan(adapter); 260 queue_work(adapter->work_queue, &adapter->scan_work);
261 break; 261 break;
262 case FSF_STATUS_READ_CFDC_UPDATED: 262 case FSF_STATUS_READ_CFDC_UPDATED:
263 zfcp_cfdc_adapter_access_changed(adapter); 263 zfcp_cfdc_adapter_access_changed(adapter);
@@ -437,34 +437,6 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
437 } 437 }
438} 438}
439 439
440#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
441#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
442#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
443#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
444#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
445#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
446#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
447
448static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
449{
450 u32 fdmi_speed = 0;
451 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
452 fdmi_speed |= FC_PORTSPEED_1GBIT;
453 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
454 fdmi_speed |= FC_PORTSPEED_2GBIT;
455 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
456 fdmi_speed |= FC_PORTSPEED_4GBIT;
457 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
458 fdmi_speed |= FC_PORTSPEED_10GBIT;
459 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
460 fdmi_speed |= FC_PORTSPEED_8GBIT;
461 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
462 fdmi_speed |= FC_PORTSPEED_16GBIT;
463 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
464 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
465 return fdmi_speed;
466}
467
468static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 440static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
469{ 441{
470 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 442 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
@@ -484,8 +456,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
484 fc_host_port_name(shost) = nsp->fl_wwpn; 456 fc_host_port_name(shost) = nsp->fl_wwpn;
485 fc_host_node_name(shost) = nsp->fl_wwnn; 457 fc_host_node_name(shost) = nsp->fl_wwnn;
486 fc_host_port_id(shost) = ntoh24(bottom->s_id); 458 fc_host_port_id(shost) = ntoh24(bottom->s_id);
487 fc_host_speed(shost) = 459 fc_host_speed(shost) = bottom->fc_link_speed;
488 zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
489 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 460 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
490 461
491 adapter->hydra_version = bottom->adapter_type; 462 adapter->hydra_version = bottom->adapter_type;
@@ -609,8 +580,7 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
609 } else 580 } else
610 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 581 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
611 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 582 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
612 fc_host_supported_speeds(shost) = 583 fc_host_supported_speeds(shost) = bottom->supported_speed;
613 zfcp_fsf_convert_portspeed(bottom->supported_speed);
614 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 584 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
615 FC_FC4_LIST_SIZE); 585 FC_FC4_LIST_SIZE);
616 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 586 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
@@ -801,14 +771,12 @@ out:
801static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 771static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
802{ 772{
803 struct scsi_device *sdev = req->data; 773 struct scsi_device *sdev = req->data;
804 struct zfcp_scsi_dev *zfcp_sdev; 774 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
805 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 775 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
806 776
807 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 777 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
808 return; 778 return;
809 779
810 zfcp_sdev = sdev_to_zfcp(sdev);
811
812 switch (req->qtcb->header.fsf_status) { 780 switch (req->qtcb->header.fsf_status) {
813 case FSF_PORT_HANDLE_NOT_VALID: 781 case FSF_PORT_HANDLE_NOT_VALID:
814 if (fsq->word[0] == fsq->word[1]) { 782 if (fsq->word[0] == fsq->word[1]) {
@@ -917,7 +885,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
917 885
918 switch (header->fsf_status) { 886 switch (header->fsf_status) {
919 case FSF_GOOD: 887 case FSF_GOOD:
920 zfcp_dbf_san_res("fsscth2", req); 888 zfcp_dbf_san_res("fsscth1", req);
921 ct->status = 0; 889 ct->status = 0;
922 break; 890 break;
923 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 891 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -968,47 +936,39 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
968 struct scatterlist *sg_resp) 936 struct scatterlist *sg_resp)
969{ 937{
970 struct zfcp_adapter *adapter = req->adapter; 938 struct zfcp_adapter *adapter = req->adapter;
971 struct zfcp_qdio *qdio = adapter->qdio;
972 struct fsf_qtcb *qtcb = req->qtcb;
973 u32 feat = adapter->adapter_features; 939 u32 feat = adapter->adapter_features;
940 int bytes;
974 941
975 if (zfcp_adapter_multi_buffer_active(adapter)) { 942 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
976 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 943 if (!zfcp_qdio_sg_one_sbale(sg_req) ||
977 return -EIO; 944 !zfcp_qdio_sg_one_sbale(sg_resp))
978 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 945 return -EOPNOTSUPP;
979 return -EIO;
980 946
981 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 947 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
982 zfcp_qdio_sbale_count(sg_req)); 948 sg_req, sg_resp);
983 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
984 zfcp_qdio_set_scount(qdio, &req->qdio_req);
985 return 0; 949 return 0;
986 } 950 }
987 951
988 /* use single, unchained SBAL if it can hold the request */ 952 /* use single, unchained SBAL if it can hold the request */
989 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 953 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
990 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 954 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
991 sg_req, sg_resp); 955 sg_req, sg_resp);
992 return 0; 956 return 0;
993 } 957 }
994 958
995 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 959 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
996 return -EOPNOTSUPP; 960 if (bytes <= 0)
997
998 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
999 return -EIO; 961 return -EIO;
962 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
963 req->qtcb->bottom.support.req_buf_length = bytes;
964 zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
1000 965
1001 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 966 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
1002 967 sg_resp);
1003 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 968 req->qtcb->bottom.support.resp_buf_length = bytes;
1004 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 969 if (bytes <= 0)
1005
1006 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1007 return -EIO; 970 return -EIO;
1008 971 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
1009 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1010
1011 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1012 972
1013 return 0; 973 return 0;
1014} 974}
@@ -1159,8 +1119,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1159 1119
1160 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1120 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1161 1121
1162 if (!zfcp_adapter_multi_buffer_active(adapter)) 1122 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1163 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1164 1123
1165 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1124 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1166 1125
@@ -1771,15 +1730,13 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1771{ 1730{
1772 struct zfcp_adapter *adapter = req->adapter; 1731 struct zfcp_adapter *adapter = req->adapter;
1773 struct scsi_device *sdev = req->data; 1732 struct scsi_device *sdev = req->data;
1774 struct zfcp_scsi_dev *zfcp_sdev; 1733 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1775 struct fsf_qtcb_header *header = &req->qtcb->header; 1734 struct fsf_qtcb_header *header = &req->qtcb->header;
1776 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1735 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1777 1736
1778 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1737 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1779 return; 1738 return;
1780 1739
1781 zfcp_sdev = sdev_to_zfcp(sdev);
1782
1783 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1740 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1784 ZFCP_STATUS_COMMON_ACCESS_BOXED | 1741 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1785 ZFCP_STATUS_LUN_SHARED | 1742 ZFCP_STATUS_LUN_SHARED |
@@ -1890,13 +1847,11 @@ out:
1890static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 1847static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1891{ 1848{
1892 struct scsi_device *sdev = req->data; 1849 struct scsi_device *sdev = req->data;
1893 struct zfcp_scsi_dev *zfcp_sdev; 1850 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1894 1851
1895 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1852 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1896 return; 1853 return;
1897 1854
1898 zfcp_sdev = sdev_to_zfcp(sdev);
1899
1900 switch (req->qtcb->header.fsf_status) { 1855 switch (req->qtcb->header.fsf_status) {
1901 case FSF_PORT_HANDLE_NOT_VALID: 1856 case FSF_PORT_HANDLE_NOT_VALID:
1902 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 1857 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
@@ -1986,7 +1941,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1986{ 1941{
1987 struct fsf_qual_latency_info *lat_in; 1942 struct fsf_qual_latency_info *lat_in;
1988 struct latency_cont *lat = NULL; 1943 struct latency_cont *lat = NULL;
1989 struct zfcp_scsi_dev *zfcp_sdev; 1944 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
1990 struct zfcp_blk_drv_data blktrc; 1945 struct zfcp_blk_drv_data blktrc;
1991 int ticks = req->adapter->timer_ticks; 1946 int ticks = req->adapter->timer_ticks;
1992 1947
@@ -2001,7 +1956,6 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2001 1956
2002 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 1957 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2003 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 1958 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2004 zfcp_sdev = sdev_to_zfcp(scsi->device);
2005 blktrc.flags |= ZFCP_BLK_LAT_VALID; 1959 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2006 blktrc.channel_lat = lat_in->channel_lat * ticks; 1960 blktrc.channel_lat = lat_in->channel_lat * ticks;
2007 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 1961 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -2039,14 +1993,12 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
2039{ 1993{
2040 struct scsi_cmnd *scmnd = req->data; 1994 struct scsi_cmnd *scmnd = req->data;
2041 struct scsi_device *sdev = scmnd->device; 1995 struct scsi_device *sdev = scmnd->device;
2042 struct zfcp_scsi_dev *zfcp_sdev; 1996 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2043 struct fsf_qtcb_header *header = &req->qtcb->header; 1997 struct fsf_qtcb_header *header = &req->qtcb->header;
2044 1998
2045 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 1999 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2046 return; 2000 return;
2047 2001
2048 zfcp_sdev = sdev_to_zfcp(sdev);
2049
2050 switch (header->fsf_status) { 2002 switch (header->fsf_status) {
2051 case FSF_HANDLE_MISMATCH: 2003 case FSF_HANDLE_MISMATCH:
2052 case FSF_PORT_HANDLE_NOT_VALID: 2004 case FSF_PORT_HANDLE_NOT_VALID:
@@ -2210,7 +2162,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2210 struct zfcp_fsf_req *req; 2162 struct zfcp_fsf_req *req;
2211 struct fcp_cmnd *fcp_cmnd; 2163 struct fcp_cmnd *fcp_cmnd;
2212 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2164 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2213 int retval = -EIO; 2165 int real_bytes, retval = -EIO, dix_bytes = 0;
2214 struct scsi_device *sdev = scsi_cmnd->device; 2166 struct scsi_device *sdev = scsi_cmnd->device;
2215 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2216 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2168 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -2255,8 +2207,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2255 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2207 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2256 } 2208 }
2257 2209
2258 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2210 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2259 goto failed_scsi_cmnd;
2260 2211
2261 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2212 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2262 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); 2213 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
@@ -2264,22 +2215,18 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2264 if (scsi_prot_sg_count(scsi_cmnd)) { 2215 if (scsi_prot_sg_count(scsi_cmnd)) {
2265 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2216 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2266 scsi_prot_sg_count(scsi_cmnd)); 2217 scsi_prot_sg_count(scsi_cmnd));
2267 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2218 dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2268 scsi_prot_sglist(scsi_cmnd));
2269 if (retval)
2270 goto failed_scsi_cmnd;
2271 io->prot_data_length = zfcp_qdio_real_bytes(
2272 scsi_prot_sglist(scsi_cmnd)); 2219 scsi_prot_sglist(scsi_cmnd));
2220 io->prot_data_length = dix_bytes;
2273 } 2221 }
2274 2222
2275 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2223 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2276 scsi_sglist(scsi_cmnd)); 2224 scsi_sglist(scsi_cmnd));
2277 if (unlikely(retval)) 2225
2226 if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
2278 goto failed_scsi_cmnd; 2227 goto failed_scsi_cmnd;
2279 2228
2280 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2229 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2281 if (zfcp_adapter_multi_buffer_active(adapter))
2282 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2283 2230
2284 retval = zfcp_fsf_req_send(req); 2231 retval = zfcp_fsf_req_send(req);
2285 if (unlikely(retval)) 2232 if (unlikely(retval))
@@ -2381,7 +2328,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2381 struct zfcp_qdio *qdio = adapter->qdio; 2328 struct zfcp_qdio *qdio = adapter->qdio;
2382 struct zfcp_fsf_req *req = NULL; 2329 struct zfcp_fsf_req *req = NULL;
2383 struct fsf_qtcb_bottom_support *bottom; 2330 struct fsf_qtcb_bottom_support *bottom;
2384 int retval = -EIO; 2331 int retval = -EIO, bytes;
2385 u8 direction; 2332 u8 direction;
2386 2333
2387 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) 2334 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
@@ -2414,17 +2361,13 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2414 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2361 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2415 bottom->option = fsf_cfdc->option; 2362 bottom->option = fsf_cfdc->option;
2416 2363
2417 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg); 2364 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2418 2365
2419 if (retval || 2366 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2420 (zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
2421 zfcp_fsf_req_free(req); 2367 zfcp_fsf_req_free(req);
2422 retval = -EIO;
2423 goto out; 2368 goto out;
2424 } 2369 }
2425 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2370 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2426 if (zfcp_adapter_multi_buffer_active(adapter))
2427 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2428 2371
2429 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2372 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2430 retval = zfcp_fsf_req_send(req); 2373 retval = zfcp_fsf_req_send(req);
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 5e795b86931..db8c85382dc 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to the FSF support functions. 4 * Interface to the FSF support functions.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#ifndef FSF_H 9#ifndef FSF_H
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 50b5615848f..d9c40ea73ee 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,23 +3,18 @@
3 * 3 *
4 * Setup and helper functions to access QDIO. 4 * Setup and helper functions to access QDIO.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h>
14#include "zfcp_ext.h" 13#include "zfcp_ext.h"
15#include "zfcp_qdio.h" 14#include "zfcp_qdio.h"
16 15
17#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 16#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
18 17
19static bool enable_multibuffer;
20module_param_named(datarouter, enable_multibuffer, bool, 0400);
21MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
22
23static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) 18static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
24{ 19{
25 int pos; 20 int pos;
@@ -42,11 +37,8 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
42 37
43 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 38 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
44 39
45 if (qdio_err & QDIO_ERROR_SLSB_STATE) { 40 if (qdio_err & QDIO_ERROR_SLSB_STATE)
46 zfcp_qdio_siosl(adapter); 41 zfcp_qdio_siosl(adapter);
47 zfcp_erp_adapter_shutdown(adapter, 0, id);
48 return;
49 }
50 zfcp_erp_adapter_reopen(adapter, 42 zfcp_erp_adapter_reopen(adapter,
51 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
52 ZFCP_STATUS_COMMON_ERP_FAILED, id); 44 ZFCP_STATUS_COMMON_ERP_FAILED, id);
@@ -101,31 +93,9 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
101 unsigned long parm) 93 unsigned long parm)
102{ 94{
103 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 95 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
104 struct zfcp_adapter *adapter = qdio->adapter; 96 int sbal_idx, sbal_no;
105 int sbal_no, sbal_idx;
106 97
107 if (unlikely(qdio_err)) { 98 if (unlikely(qdio_err)) {
108 if (zfcp_adapter_multi_buffer_active(adapter)) {
109 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
110 struct qdio_buffer_element *sbale;
111 u64 req_id;
112 u8 scount;
113
114 memset(pl, 0,
115 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
116 sbale = qdio->res_q[idx]->element;
117 req_id = (u64) sbale->addr;
118 scount = min(sbale->scount + 1,
119 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
120 /* incl. signaling SBAL */
121
122 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
123 sbal_idx = (idx + sbal_no) %
124 QDIO_MAX_BUFFERS_PER_Q;
125 pl[sbal_no] = qdio->res_q[sbal_idx];
126 }
127 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
128 }
129 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); 99 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
130 return; 100 return;
131 } 101 }
@@ -185,7 +155,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
185static struct qdio_buffer_element * 155static struct qdio_buffer_element *
186zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 156zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
187{ 157{
188 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1) 158 if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL)
189 return zfcp_qdio_sbal_chain(qdio, q_req); 159 return zfcp_qdio_sbal_chain(qdio, q_req);
190 q_req->sbale_curr++; 160 q_req->sbale_curr++;
191 return zfcp_qdio_sbale_curr(qdio, q_req); 161 return zfcp_qdio_sbale_curr(qdio, q_req);
@@ -197,12 +167,13 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
197 * @q_req: pointer to struct zfcp_qdio_req 167 * @q_req: pointer to struct zfcp_qdio_req
198 * @sg: scatter-gather list 168 * @sg: scatter-gather list
199 * @max_sbals: upper bound for number of SBALs to be used 169 * @max_sbals: upper bound for number of SBALs to be used
200 * Returns: zero or -EINVAL on error 170 * Returns: number of bytes, or error (negativ)
201 */ 171 */
202int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 172int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
203 struct scatterlist *sg) 173 struct scatterlist *sg)
204{ 174{
205 struct qdio_buffer_element *sbale; 175 struct qdio_buffer_element *sbale;
176 int bytes = 0;
206 177
207 /* set storage-block type for this request */ 178 /* set storage-block type for this request */
208 sbale = zfcp_qdio_sbale_req(qdio, q_req); 179 sbale = zfcp_qdio_sbale_req(qdio, q_req);
@@ -216,10 +187,14 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
216 q_req->sbal_number); 187 q_req->sbal_number);
217 return -EINVAL; 188 return -EINVAL;
218 } 189 }
190
219 sbale->addr = sg_virt(sg); 191 sbale->addr = sg_virt(sg);
220 sbale->length = sg->length; 192 sbale->length = sg->length;
193
194 bytes += sg->length;
221 } 195 }
222 return 0; 196
197 return bytes;
223} 198}
224 199
225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 200static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
@@ -308,8 +283,6 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
308 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
309 ASCEBC(id->adapter_name, 8); 284 ASCEBC(id->adapter_name, 8);
310 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
311 if (enable_multibuffer)
312 id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
313 id->no_input_qs = 1; 286 id->no_input_qs = 1;
314 id->no_output_qs = 1; 287 id->no_output_qs = 1;
315 id->input_handler = zfcp_qdio_int_resp; 288 id->input_handler = zfcp_qdio_int_resp;
@@ -405,17 +378,6 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
405 atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, 378 atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
406 &qdio->adapter->status); 379 &qdio->adapter->status);
407 380
408 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
409 atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
410 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
411 } else {
412 atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
413 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
414 }
415
416 qdio->max_sbale_per_req =
417 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
418 - 2;
419 if (qdio_activate(cdev)) 381 if (qdio_activate(cdev))
420 goto failed_qdio; 382 goto failed_qdio;
421 383
@@ -435,11 +397,6 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
435 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 397 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
436 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 398 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
437 399
438 if (adapter->scsi_host) {
439 adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
440 adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
441 }
442
443 return 0; 400 return 0;
444 401
445failed_qdio: 402failed_qdio:
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 497cd379b0d..54e22ace012 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Header file for zfcp qdio interface 4 * Header file for zfcp qdio interface
5 * 5 *
6 * Copyright IBM Corp. 2010 6 * Copyright IBM Corporation 2010
7 */ 7 */
8 8
9#ifndef ZFCP_QDIO_H 9#ifndef ZFCP_QDIO_H
@@ -13,9 +13,20 @@
13 13
14#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE 14#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
15 15
16/* DMQ bug workaround: don't use last SBALE */
17#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
18
19/* index of last SBALE (with respect to DMQ bug workaround) */
20#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
21
16/* Max SBALS for chaining */ 22/* Max SBALS for chaining */
17#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36 23#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
18 24
25/* max. number of (data buffer) SBALEs in largest SBAL chain
26 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
27#define ZFCP_QDIO_MAX_SBALES_PER_REQ \
28 (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
29
19/** 30/**
20 * struct zfcp_qdio - basic qdio data structure 31 * struct zfcp_qdio - basic qdio data structure
21 * @res_q: response queue 32 * @res_q: response queue
@@ -42,8 +53,6 @@ struct zfcp_qdio {
42 atomic_t req_q_full; 53 atomic_t req_q_full;
43 wait_queue_head_t req_q_wq; 54 wait_queue_head_t req_q_wq;
44 struct zfcp_adapter *adapter; 55 struct zfcp_adapter *adapter;
45 u16 max_sbale_per_sbal;
46 u16 max_sbale_per_req;
47}; 56};
48 57
49/** 58/**
@@ -146,7 +155,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
146{ 155{
147 struct qdio_buffer_element *sbale; 156 struct qdio_buffer_element *sbale;
148 157
149 BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); 158 BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL);
150 q_req->sbale_curr++; 159 q_req->sbale_curr++;
151 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 160 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
152 sbale->addr = data; 161 sbale->addr = data;
@@ -186,10 +195,9 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
186 * @q_req: The current zfcp_qdio_req 195 * @q_req: The current zfcp_qdio_req
187 */ 196 */
188static inline 197static inline
189void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio, 198void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req)
190 struct zfcp_qdio_req *q_req)
191{ 199{
192 q_req->sbale_curr = qdio->max_sbale_per_sbal - 1; 200 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL;
193} 201}
194 202
195/** 203/**
@@ -220,52 +228,8 @@ void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
220{ 228{
221 struct qdio_buffer_element *sbale; 229 struct qdio_buffer_element *sbale;
222 230
223 sbale = qdio->req_q[q_req->sbal_first]->element; 231 sbale = &qdio->req_q[q_req->sbal_first]->element[0];
224 sbale->length = count; 232 sbale->length = count;
225} 233}
226 234
227/**
228 * zfcp_qdio_sbale_count - count sbale used
229 * @sg: pointer to struct scatterlist
230 */
231static inline
232unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
233{
234 unsigned int count = 0;
235
236 for (; sg; sg = sg_next(sg))
237 count++;
238
239 return count;
240}
241
242/**
243 * zfcp_qdio_real_bytes - count bytes used
244 * @sg: pointer to struct scatterlist
245 */
246static inline
247unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
248{
249 unsigned int real_bytes = 0;
250
251 for (; sg; sg = sg_next(sg))
252 real_bytes += sg->length;
253
254 return real_bytes;
255}
256
257/**
258 * zfcp_qdio_set_scount - set SBAL count value
259 * @qdio: pointer to struct zfcp_qdio
260 * @q_req: The current zfcp_qdio_req
261 */
262static inline
263void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
264{
265 struct qdio_buffer_element *sbale;
266
267 sbale = qdio->req_q[q_req->sbal_first]->element;
268 sbale->scount = q_req->sbal_number - 1;
269}
270
271#endif /* ZFCP_QDIO_H */ 235#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index 7c2c6194dfc..a72d1b730ab 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -4,7 +4,7 @@
4 * Data structure and helper functions for tracking pending FSF 4 * Data structure and helper functions for tracking pending FSF
5 * requests. 5 * requests.
6 * 6 *
7 * Copyright IBM Corp. 2009 7 * Copyright IBM Corporation 2009
8 */ 8 */
9 9
10#ifndef ZFCP_REQLIST_H 10#ifndef ZFCP_REQLIST_H
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7b31e3f403f..169ba7b7504 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,13 +3,12 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corporation 2002, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 11
12#include <linux/module.h>
13#include <linux/types.h> 12#include <linux/types.h>
14#include <linux/slab.h> 13#include <linux/slab.h>
15#include <scsi/fc/fc_fcp.h> 14#include <scsi/fc/fc_fcp.h>
@@ -25,8 +24,11 @@ module_param_named(queue_depth, default_depth, uint, 0600);
25MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 24MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
26 25
27static bool enable_dif; 26static bool enable_dif;
28module_param_named(dif, enable_dif, bool, 0400); 27
28#ifdef CONFIG_ZFCP_DIF
29module_param_named(dif, enable_dif, bool, 0600);
29MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); 30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
31#endif
30 32
31static bool allow_lun_scan = 1; 33static bool allow_lun_scan = 1;
32module_param(allow_lun_scan, bool, 0600); 34module_param(allow_lun_scan, bool, 0600);
@@ -311,8 +313,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
311 .proc_name = "zfcp", 313 .proc_name = "zfcp",
312 .can_queue = 4096, 314 .can_queue = 4096,
313 .this_id = -1, 315 .this_id = -1,
314 .sg_tablesize = 1, /* adjusted later */ 316 .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
315 .max_sectors = 8, /* adjusted later */ 317 .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
316 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 318 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
317 .cmd_per_lun = 1, 319 .cmd_per_lun = 1,
318 .use_clustering = 1, 320 .use_clustering = 1,
@@ -670,9 +672,9 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
670 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { 672 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
671 mask |= SHOST_DIX_TYPE1_PROTECTION; 673 mask |= SHOST_DIX_TYPE1_PROTECTION;
672 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 674 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
673 shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2; 675 shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
674 shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2; 676 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2;
675 shost->max_sectors = shost->sg_tablesize * 8; 677 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2;
676 } 678 }
677 679
678 scsi_host_set_prot(shost, mask); 680 scsi_host_set_prot(shost, mask);
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 1e0eb089dfb..cdc4ff78a7b 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * sysfs attributes. 4 * sysfs attributes.
5 * 5 *
6 * Copyright IBM Corp. 2008, 2010 6 * Copyright IBM Corporation 2008, 2010
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -227,8 +227,6 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
227static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL, 227static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
228 zfcp_sysfs_port_rescan_store); 228 zfcp_sysfs_port_rescan_store);
229 229
230DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
231
232static ssize_t zfcp_sysfs_port_remove_store(struct device *dev, 230static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
233 struct device_attribute *attr, 231 struct device_attribute *attr,
234 const char *buf, size_t count) 232 const char *buf, size_t count)
@@ -251,16 +249,6 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
251 else 249 else
252 retval = 0; 250 retval = 0;
253 251
254 mutex_lock(&zfcp_sysfs_port_units_mutex);
255 if (atomic_read(&port->units) > 0) {
256 retval = -EBUSY;
257 mutex_unlock(&zfcp_sysfs_port_units_mutex);
258 goto out;
259 }
260 /* port is about to be removed, so no more unit_add */
261 atomic_set(&port->units, -1);
262 mutex_unlock(&zfcp_sysfs_port_units_mutex);
263
264 write_lock_irq(&adapter->port_list_lock); 252 write_lock_irq(&adapter->port_list_lock);
265 list_del(&port->list); 253 list_del(&port->list);
266 write_unlock_irq(&adapter->port_list_lock); 254 write_unlock_irq(&adapter->port_list_lock);
@@ -301,14 +289,12 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
301{ 289{
302 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev); 290 struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
303 u64 fcp_lun; 291 u64 fcp_lun;
304 int retval;
305 292
306 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun)) 293 if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
307 return -EINVAL; 294 return -EINVAL;
308 295
309 retval = zfcp_unit_add(port, fcp_lun); 296 if (zfcp_unit_add(port, fcp_lun))
310 if (retval) 297 return -EINVAL;
311 return retval;
312 298
313 return count; 299 return count;
314} 300}
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1cd2b99ab25..20796ebc33c 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -4,7 +4,7 @@
4 * Tracking of manually configured LUNs and helper functions to 4 * Tracking of manually configured LUNs and helper functions to
5 * register the LUNs with the SCSI midlayer. 5 * register the LUNs with the SCSI midlayer.
6 * 6 *
7 * Copyright IBM Corp. 2010 7 * Copyright IBM Corporation 2010
8 */ 8 */
9 9
10#include "zfcp_def.h" 10#include "zfcp_def.h"
@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
104{ 104{
105 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); 105 struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
106 106
107 atomic_dec(&unit->port->units); 107 put_device(&unit->port->dev);
108 kfree(unit); 108 kfree(unit);
109} 109}
110 110
@@ -119,27 +119,16 @@ static void zfcp_unit_release(struct device *dev)
119int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) 119int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
120{ 120{
121 struct zfcp_unit *unit; 121 struct zfcp_unit *unit;
122 int retval = 0;
123
124 mutex_lock(&zfcp_sysfs_port_units_mutex);
125 if (atomic_read(&port->units) == -1) {
126 /* port is already gone */
127 retval = -ENODEV;
128 goto out;
129 }
130 122
131 unit = zfcp_unit_find(port, fcp_lun); 123 unit = zfcp_unit_find(port, fcp_lun);
132 if (unit) { 124 if (unit) {
133 put_device(&unit->dev); 125 put_device(&unit->dev);
134 retval = -EEXIST; 126 return -EEXIST;
135 goto out;
136 } 127 }
137 128
138 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL); 129 unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
139 if (!unit) { 130 if (!unit)
140 retval = -ENOMEM; 131 return -ENOMEM;
141 goto out;
142 }
143 132
144 unit->port = port; 133 unit->port = port;
145 unit->fcp_lun = fcp_lun; 134 unit->fcp_lun = fcp_lun;
@@ -150,33 +139,28 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
150 if (dev_set_name(&unit->dev, "0x%016llx", 139 if (dev_set_name(&unit->dev, "0x%016llx",
151 (unsigned long long) fcp_lun)) { 140 (unsigned long long) fcp_lun)) {
152 kfree(unit); 141 kfree(unit);
153 retval = -ENOMEM; 142 return -ENOMEM;
154 goto out;
155 } 143 }
156 144
145 get_device(&port->dev);
146
157 if (device_register(&unit->dev)) { 147 if (device_register(&unit->dev)) {
158 put_device(&unit->dev); 148 put_device(&unit->dev);
159 retval = -ENOMEM; 149 return -ENOMEM;
160 goto out;
161 } 150 }
162 151
163 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) { 152 if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
164 device_unregister(&unit->dev); 153 device_unregister(&unit->dev);
165 retval = -EINVAL; 154 return -EINVAL;
166 goto out;
167 } 155 }
168 156
169 atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
170
171 write_lock_irq(&port->unit_list_lock); 157 write_lock_irq(&port->unit_list_lock);
172 list_add_tail(&unit->list, &port->unit_list); 158 list_add_tail(&unit->list, &port->unit_list);
173 write_unlock_irq(&port->unit_list_lock); 159 write_unlock_irq(&port->unit_list_lock);
174 160
175 zfcp_unit_scsi_scan(unit); 161 zfcp_unit_scsi_scan(unit);
176 162
177out: 163 return 0;
178 mutex_unlock(&zfcp_sysfs_port_units_mutex);
179 return retval;
180} 164}
181 165
182/** 166/**