aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2006-01-07 09:40:05 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-01-07 09:40:05 -0500
commit123656d4cc8c946f578ebd18c2050f5251720428 (patch)
tree3d5432eff034a3b9cfdc98b37e245abe5695342d /drivers/s390
parenta62c80e559809e6c7851ec04d30575e85ad6f6ed (diff)
parent0aec63e67c69545ca757a73a66f5dcf05fa484bf (diff)
Merge with Linus' kernel.
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/Kconfig8
-rw-r--r--drivers/s390/block/dasd.c34
-rw-r--r--drivers/s390/block/dasd_diag.c11
-rw-r--r--drivers/s390/block/dasd_diag.h31
-rw-r--r--drivers/s390/block/dasd_eckd.c9
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c5
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_quiesce.c2
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/vmwatchdog.c2
-rw-r--r--drivers/s390/cio/blacklist.c234
-rw-r--r--drivers/s390/cio/blacklist.h2
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/chsc.c473
-rw-r--r--drivers/s390/cio/chsc.h13
-rw-r--r--drivers/s390/cio/cio.c168
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/cmf.c8
-rw-r--r--drivers/s390/cio/css.c297
-rw-r--r--drivers/s390/cio/css.h43
-rw-r--r--drivers/s390/cio/device.c51
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c29
-rw-r--r--drivers/s390/cio/device_id.c26
-rw-r--r--drivers/s390/cio/device_ops.c4
-rw-r--r--drivers/s390/cio/device_pgid.c56
-rw-r--r--drivers/s390/cio/device_status.c14
-rw-r--r--drivers/s390/cio/ioasm.h86
-rw-r--r--drivers/s390/cio/qdio.c713
-rw-r--r--drivers/s390/cio/qdio.h144
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/crypto/z90common.h9
-rw-r--r--drivers/s390/crypto/z90crypt.h13
-rw-r--r--drivers/s390/crypto/z90hardware.c309
-rw-r--r--drivers/s390/crypto/z90main.c112
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/claw.c6
-rw-r--r--drivers/s390/net/cu3088.c3
-rw-r--r--drivers/s390/net/iucv.c10
-rw-r--r--drivers/s390/net/qeth_main.c21
-rw-r--r--drivers/s390/s390_rdev.c53
-rw-r--r--drivers/s390/s390mach.c66
-rw-r--r--drivers/s390/sysinfo.c2
48 files changed, 2154 insertions, 986 deletions
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index c99a2fe92fb0..9803c9352d78 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5obj-y += s390mach.o sysinfo.o 5obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 6obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 7
8drivers-y += drivers/s390/built-in.o 8drivers-y += drivers/s390/built-in.o
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 6e7d7b06421d..6f50cc9323d9 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,11 +1,11 @@
1if ARCH_S390 1if S390
2 2
3comment "S/390 block device drivers" 3comment "S/390 block device drivers"
4 depends on ARCH_S390 4 depends on S390
5 5
6config BLK_DEV_XPRAM 6config BLK_DEV_XPRAM
7 tristate "XPRAM disk support" 7 tristate "XPRAM disk support"
8 depends on ARCH_S390 8 depends on S390
9 help 9 help
10 Select this option if you want to use your expanded storage on S/390 10 Select this option if you want to use your expanded storage on S/390
11 or zSeries as a disk. This is useful as a _fast_ swap device if you 11 or zSeries as a disk. This is useful as a _fast_ swap device if you
@@ -49,7 +49,7 @@ config DASD_FBA
49 49
50config DASD_DIAG 50config DASD_DIAG
51 tristate "Support for DIAG access to Disks" 51 tristate "Support for DIAG access to Disks"
52 depends on DASD && ( ARCH_S390X = 'n' || EXPERIMENTAL) 52 depends on DASD && ( 64BIT = 'n' || EXPERIMENTAL)
53 help 53 help
54 Select this option if you want to use Diagnose250 command to access 54 Select this option if you want to use Diagnose250 command to access
55 Disks under VM. If you are not running under VM or unsure what it is, 55 Disks under VM. If you are not running under VM or unsure what it is,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 7008d32433bf..f779f674dfa0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.167 $ 10 * $Revision: 1.172 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -604,7 +604,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
604void 604void
605dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 605dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
606{ 606{
607#ifdef CONFIG_ARCH_S390X 607#ifdef CONFIG_64BIT
608 struct ccw1 *ccw; 608 struct ccw1 *ccw;
609 609
610 /* Clear any idals used for the request. */ 610 /* Clear any idals used for the request. */
@@ -1035,7 +1035,7 @@ dasd_end_request(struct request *req, int uptodate)
1035 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1035 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1036 BUG(); 1036 BUG();
1037 add_disk_randomness(req->rq_disk); 1037 add_disk_randomness(req->rq_disk);
1038 end_that_request_last(req); 1038 end_that_request_last(req, uptodate);
1039} 1039}
1040 1040
1041/* 1041/*
@@ -1224,6 +1224,12 @@ __dasd_start_head(struct dasd_device * device)
1224 if (list_empty(&device->ccw_queue)) 1224 if (list_empty(&device->ccw_queue))
1225 return; 1225 return;
1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1227 /* check FAILFAST */
1228 if (device->stopped & ~DASD_STOPPED_PENDING &&
1229 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
1230 cqr->status = DASD_CQR_FAILED;
1231 dasd_schedule_bh(device);
1232 }
1227 if ((cqr->status == DASD_CQR_QUEUED) && 1233 if ((cqr->status == DASD_CQR_QUEUED) &&
1228 (!device->stopped)) { 1234 (!device->stopped)) {
1229 /* try to start the first I/O that can be started */ 1235 /* try to start the first I/O that can be started */
@@ -1323,7 +1329,7 @@ void
1323dasd_schedule_bh(struct dasd_device * device) 1329dasd_schedule_bh(struct dasd_device * device)
1324{ 1330{
1325 /* Protect against rescheduling. */ 1331 /* Protect against rescheduling. */
1326 if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled)) 1332 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1327 return; 1333 return;
1328 dasd_get_device(device); 1334 dasd_get_device(device);
1329 tasklet_hi_schedule(&device->tasklet); 1335 tasklet_hi_schedule(&device->tasklet);
@@ -1750,8 +1756,10 @@ dasd_exit(void)
1750 * SECTION: common functions for ccw_driver use 1756 * SECTION: common functions for ccw_driver use
1751 */ 1757 */
1752 1758
1753/* initial attempt at a probe function. this can be simplified once 1759/*
1754 * the other detection code is gone */ 1760 * Initial attempt at a probe function. this can be simplified once
1761 * the other detection code is gone.
1762 */
1755int 1763int
1756dasd_generic_probe (struct ccw_device *cdev, 1764dasd_generic_probe (struct ccw_device *cdev,
1757 struct dasd_discipline *discipline) 1765 struct dasd_discipline *discipline)
@@ -1770,8 +1778,10 @@ dasd_generic_probe (struct ccw_device *cdev,
1770 return ret; 1778 return ret;
1771} 1779}
1772 1780
1773/* this will one day be called from a global not_oper handler. 1781/*
1774 * It is also used by driver_unregister during module unload */ 1782 * This will one day be called from a global not_oper handler.
1783 * It is also used by driver_unregister during module unload.
1784 */
1775void 1785void
1776dasd_generic_remove (struct ccw_device *cdev) 1786dasd_generic_remove (struct ccw_device *cdev)
1777{ 1787{
@@ -1798,9 +1808,11 @@ dasd_generic_remove (struct ccw_device *cdev)
1798 dasd_delete_device(device); 1808 dasd_delete_device(device);
1799} 1809}
1800 1810
1801/* activate a device. This is called from dasd_{eckd,fba}_probe() when either 1811/*
1812 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1802 * the device is detected for the first time and is supposed to be used 1813 * the device is detected for the first time and is supposed to be used
1803 * or the user has started activation through sysfs */ 1814 * or the user has started activation through sysfs.
1815 */
1804int 1816int
1805dasd_generic_set_online (struct ccw_device *cdev, 1817dasd_generic_set_online (struct ccw_device *cdev,
1806 struct dasd_discipline *discipline) 1818 struct dasd_discipline *discipline)
@@ -1917,7 +1929,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1917 if (cqr->status == DASD_CQR_IN_IO) 1929 if (cqr->status == DASD_CQR_IN_IO)
1918 cqr->status = DASD_CQR_FAILED; 1930 cqr->status = DASD_CQR_FAILED;
1919 device->stopped |= DASD_STOPPED_DC_EIO; 1931 device->stopped |= DASD_STOPPED_DC_EIO;
1920 dasd_schedule_bh(device);
1921 } else { 1932 } else {
1922 list_for_each_entry(cqr, &device->ccw_queue, list) 1933 list_for_each_entry(cqr, &device->ccw_queue, list)
1923 if (cqr->status == DASD_CQR_IN_IO) { 1934 if (cqr->status == DASD_CQR_IN_IO) {
@@ -1927,6 +1938,7 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
1927 device->stopped |= DASD_STOPPED_DC_WAIT; 1938 device->stopped |= DASD_STOPPED_DC_WAIT;
1928 dasd_set_timer(device, 0); 1939 dasd_set_timer(device, 0);
1929 } 1940 }
1941 dasd_schedule_bh(device);
1930 ret = 1; 1942 ret = 1;
1931 break; 1943 break;
1932 case CIO_OPER: 1944 case CIO_OPER:
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index ab8754e566bc..ba80fdea7ebf 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.51 $ 9 * $Revision: 1.53 $
10 */ 10 */
11 11
12#include <linux/config.h> 12#include <linux/config.h>
@@ -25,6 +25,7 @@
25#include <asm/io.h> 25#include <asm/io.h>
26#include <asm/s390_ext.h> 26#include <asm/s390_ext.h>
27#include <asm/todclk.h> 27#include <asm/todclk.h>
28#include <asm/vtoc.h>
28 29
29#include "dasd_int.h" 30#include "dasd_int.h"
30#include "dasd_diag.h" 31#include "dasd_diag.h"
@@ -74,7 +75,7 @@ dia250(void *iob, int cmd)
74 int rc; 75 int rc;
75 76
76 __asm__ __volatile__( 77 __asm__ __volatile__(
77#ifdef CONFIG_ARCH_S390X 78#ifdef CONFIG_64BIT
78 " lghi %0,3\n" 79 " lghi %0,3\n"
79 " lgr 0,%3\n" 80 " lgr 0,%3\n"
80 " diag 0,%2,0x250\n" 81 " diag 0,%2,0x250\n"
@@ -329,7 +330,7 @@ dasd_diag_check_device(struct dasd_device *device)
329 struct dasd_diag_private *private; 330 struct dasd_diag_private *private;
330 struct dasd_diag_characteristics *rdc_data; 331 struct dasd_diag_characteristics *rdc_data;
331 struct dasd_diag_bio bio; 332 struct dasd_diag_bio bio;
332 struct dasd_diag_cms_label *label; 333 struct vtoc_cms_label *label;
333 blocknum_t end_block; 334 blocknum_t end_block;
334 unsigned int sb, bsize; 335 unsigned int sb, bsize;
335 int rc; 336 int rc;
@@ -380,7 +381,7 @@ dasd_diag_check_device(struct dasd_device *device)
380 mdsk_term_io(device); 381 mdsk_term_io(device);
381 382
382 /* figure out blocksize of device */ 383 /* figure out blocksize of device */
383 label = (struct dasd_diag_cms_label *) get_zeroed_page(GFP_KERNEL); 384 label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
384 if (label == NULL) { 385 if (label == NULL) {
385 DEV_MESSAGE(KERN_WARNING, device, "%s", 386 DEV_MESSAGE(KERN_WARNING, device, "%s",
386 "No memory to allocate initialization request"); 387 "No memory to allocate initialization request");
@@ -548,6 +549,8 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
548 } 549 }
549 cqr->retries = DIAG_MAX_RETRIES; 550 cqr->retries = DIAG_MAX_RETRIES;
550 cqr->buildclk = get_clock(); 551 cqr->buildclk = get_clock();
552 if (req->flags & REQ_FAILFAST)
553 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
551 cqr->device = device; 554 cqr->device = device;
552 cqr->expires = DIAG_TIMEOUT; 555 cqr->expires = DIAG_TIMEOUT;
553 cqr->status = DASD_CQR_FILLED; 556 cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index df31484d73a7..a4f80bd735f1 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.8 $ 9 * $Revision: 1.9 $
10 */ 10 */
11 11
12#define MDSK_WRITE_REQ 0x01 12#define MDSK_WRITE_REQ 0x01
@@ -44,29 +44,8 @@ struct dasd_diag_characteristics {
44 u8 rdev_features; 44 u8 rdev_features;
45} __attribute__ ((packed, aligned(4))); 45} __attribute__ ((packed, aligned(4)));
46 46
47struct dasd_diag_cms_label { 47
48 u8 label_id[4]; 48#ifdef CONFIG_64BIT
49 u8 vol_id[6];
50 u16 version_id;
51 u32 block_size;
52 u32 origin_ptr;
53 u32 usable_count;
54 u32 formatted_count;
55 u32 block_count;
56 u32 used_count;
57 u32 fst_size;
58 u32 fst_count;
59 u8 format_date[6];
60 u8 reserved1[2];
61 u32 disk_offset;
62 u32 map_block;
63 u32 hblk_disp;
64 u32 user_disp;
65 u8 reserved2[4];
66 u8 segment_name[8];
67} __attribute__ ((packed));
68
69#ifdef CONFIG_ARCH_S390X
70#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT 49#define DASD_DIAG_FLAGA_DEFAULT DASD_DIAG_FLAGA_FORMAT_64BIT
71 50
72typedef u64 blocknum_t; 51typedef u64 blocknum_t;
@@ -107,7 +86,7 @@ struct dasd_diag_rw_io {
107 struct dasd_diag_bio *bio_list; 86 struct dasd_diag_bio *bio_list;
108 u8 spare4[8]; 87 u8 spare4[8];
109} __attribute__ ((packed, aligned(8))); 88} __attribute__ ((packed, aligned(8)));
110#else /* CONFIG_ARCH_S390X */ 89#else /* CONFIG_64BIT */
111#define DASD_DIAG_FLAGA_DEFAULT 0x0 90#define DASD_DIAG_FLAGA_DEFAULT 0x0
112 91
113typedef u32 blocknum_t; 92typedef u32 blocknum_t;
@@ -146,4 +125,4 @@ struct dasd_diag_rw_io {
146 u32 interrupt_params; 125 u32 interrupt_params;
147 u8 spare3[20]; 126 u8 spare3[20];
148} __attribute__ ((packed, aligned(8))); 127} __attribute__ ((packed, aligned(8)));
149#endif /* CONFIG_ARCH_S390X */ 128#endif /* CONFIG_64BIT */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 811060e10c00..96eb48258580 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 * 9 *
10 * $Revision: 1.71 $ 10 * $Revision: 1.74 $
11 */ 11 */
12 12
13#include <linux/config.h> 13#include <linux/config.h>
@@ -1041,7 +1041,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1041 /* Eckd can only do full blocks. */ 1041 /* Eckd can only do full blocks. */
1042 return ERR_PTR(-EINVAL); 1042 return ERR_PTR(-EINVAL);
1043 count += bv->bv_len >> (device->s2b_shift + 9); 1043 count += bv->bv_len >> (device->s2b_shift + 9);
1044#if defined(CONFIG_ARCH_S390X) 1044#if defined(CONFIG_64BIT)
1045 if (idal_is_needed (page_address(bv->bv_page), 1045 if (idal_is_needed (page_address(bv->bv_page),
1046 bv->bv_len)) 1046 bv->bv_len))
1047 cidaw += bv->bv_len >> (device->s2b_shift + 9); 1047 cidaw += bv->bv_len >> (device->s2b_shift + 9);
@@ -1136,6 +1136,8 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1136 recid++; 1136 recid++;
1137 } 1137 }
1138 } 1138 }
1139 if (req->flags & REQ_FAILFAST)
1140 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1139 cqr->device = device; 1141 cqr->device = device;
1140 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1142 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1141 cqr->lpm = private->path_data.ppm; 1143 cqr->lpm = private->path_data.ppm;
@@ -1252,6 +1254,7 @@ dasd_eckd_release(struct block_device *bdev, int no, long args)
1252 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1254 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1253 cqr->device = device; 1255 cqr->device = device;
1254 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1256 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1257 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1255 cqr->retries = 0; 1258 cqr->retries = 0;
1256 cqr->expires = 2 * HZ; 1259 cqr->expires = 2 * HZ;
1257 cqr->buildclk = get_clock(); 1260 cqr->buildclk = get_clock();
@@ -1296,6 +1299,7 @@ dasd_eckd_reserve(struct block_device *bdev, int no, long args)
1296 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1299 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1297 cqr->device = device; 1300 cqr->device = device;
1298 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1301 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1302 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1299 cqr->retries = 0; 1303 cqr->retries = 0;
1300 cqr->expires = 2 * HZ; 1304 cqr->expires = 2 * HZ;
1301 cqr->buildclk = get_clock(); 1305 cqr->buildclk = get_clock();
@@ -1339,6 +1343,7 @@ dasd_eckd_steal_lock(struct block_device *bdev, int no, long args)
1339 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1343 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1340 cqr->device = device; 1344 cqr->device = device;
1341 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1345 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1346 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1342 cqr->retries = 0; 1347 cqr->retries = 0;
1343 cqr->expires = 2 * HZ; 1348 cqr->expires = 2 * HZ;
1344 cqr->buildclk = get_clock(); 1349 cqr->buildclk = get_clock();
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 28cb4613b7f5..8ec75dc08e2c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -4,7 +4,7 @@
4 * Bugreports.to..: <Linux390@de.ibm.com> 4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 * 6 *
7 * $Revision: 1.40 $ 7 * $Revision: 1.41 $
8 */ 8 */
9 9
10#include <linux/config.h> 10#include <linux/config.h>
@@ -271,7 +271,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
271 /* Fba can only do full blocks. */ 271 /* Fba can only do full blocks. */
272 return ERR_PTR(-EINVAL); 272 return ERR_PTR(-EINVAL);
273 count += bv->bv_len >> (device->s2b_shift + 9); 273 count += bv->bv_len >> (device->s2b_shift + 9);
274#if defined(CONFIG_ARCH_S390X) 274#if defined(CONFIG_64BIT)
275 if (idal_is_needed (page_address(bv->bv_page), 275 if (idal_is_needed (page_address(bv->bv_page),
276 bv->bv_len)) 276 bv->bv_len))
277 cidaw += bv->bv_len / blksize; 277 cidaw += bv->bv_len / blksize;
@@ -352,6 +352,8 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
352 recid++; 352 recid++;
353 } 353 }
354 } 354 }
355 if (req->flags & REQ_FAILFAST)
356 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
355 cqr->device = device; 357 cqr->device = device;
356 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 358 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
357 cqr->retries = 32; 359 cqr->retries = 32;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 9fab04f3056d..2fb05c4a528c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -6,7 +6,7 @@
6 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 * 8 *
9 * $Revision: 1.65 $ 9 * $Revision: 1.68 $
10 */ 10 */
11 11
12#ifndef DASD_INT_H 12#ifndef DASD_INT_H
@@ -208,6 +208,7 @@ struct dasd_ccw_req {
208 208
209/* per dasd_ccw_req flags */ 209/* per dasd_ccw_req flags */
210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
211#define DASD_CQR_FLAGS_FAILFAST 1 /* FAILFAST */
211 212
212/* Signature for error recovery functions. */ 213/* Signature for error recovery functions. */
213typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); 214typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 789595b3fa09..044b75371990 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -7,7 +7,7 @@
7 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 * 9 *
10 * $Revision: 1.47 $ 10 * $Revision: 1.50 $
11 * 11 *
12 * i/o controls for the dasd driver. 12 * i/o controls for the dasd driver.
13 */ 13 */
@@ -352,6 +352,9 @@ dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
352 if (device == NULL) 352 if (device == NULL)
353 return -ENODEV; 353 return -ENODEV;
354 354
355 if (dasd_profile_level == DASD_PROFILE_OFF)
356 return -EIO;
357
355 if (copy_to_user((long __user *) args, (long *) &device->profile, 358 if (copy_to_user((long __user *) args, (long *) &device->profile,
356 sizeof (struct dasd_profile_info_t))) 359 sizeof (struct dasd_profile_info_t)))
357 return -EFAULT; 360 return -EFAULT;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4fde41188996..2e727f49ad19 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -15,7 +15,7 @@
15#include <asm/io.h> 15#include <asm/io.h>
16#include <linux/completion.h> 16#include <linux/completion.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <asm/ccwdev.h> // for s390_root_dev_(un)register() 18#include <asm/s390_rdev.h>
19 19
20//#define DCSSBLK_DEBUG /* Debug messages on/off */ 20//#define DCSSBLK_DEBUG /* Debug messages on/off */
21#define DCSSBLK_NAME "dcssblk" 21#define DCSSBLK_NAME "dcssblk"
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index d428c909b8a0..bf3a67c3cc5e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -160,7 +160,7 @@ static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
160 "0: ipm %0\n" 160 "0: ipm %0\n"
161 " srl %0,28\n" 161 " srl %0,28\n"
162 "1:\n" 162 "1:\n"
163#ifndef CONFIG_ARCH_S390X 163#ifndef CONFIG_64BIT
164 ".section __ex_table,\"a\"\n" 164 ".section __ex_table,\"a\"\n"
165 " .align 4\n" 165 " .align 4\n"
166 " .long 0b,1b\n" 166 " .long 0b,1b\n"
@@ -208,7 +208,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
208 "0: ipm %0\n" 208 "0: ipm %0\n"
209 " srl %0,28\n" 209 " srl %0,28\n"
210 "1:\n" 210 "1:\n"
211#ifndef CONFIG_ARCH_S390X 211#ifndef CONFIG_64BIT
212 ".section __ex_table,\"a\"\n" 212 ".section __ex_table,\"a\"\n"
213 " .align 4\n" 213 " .align 4\n"
214 " .long 0b,1b\n" 214 " .long 0b,1b\n"
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 5a6cef2dfa13..80f7f31310e6 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -204,7 +204,7 @@ cpi_module_init(void)
204 printk(KERN_WARNING "cpi: no control program identification " 204 printk(KERN_WARNING "cpi: no control program identification "
205 "support\n"); 205 "support\n");
206 sclp_unregister(&sclp_cpi_event); 206 sclp_unregister(&sclp_cpi_event);
207 return -ENOTSUPP; 207 return -EOPNOTSUPP;
208 } 208 }
209 209
210 req = cpi_prepare_req(); 210 req = cpi_prepare_req();
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 83f75774df60..56fa69168898 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -32,7 +32,7 @@ do_load_quiesce_psw(void * __unused)
32 psw_t quiesce_psw; 32 psw_t quiesce_psw;
33 int cpu; 33 int cpu;
34 34
35 if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid)) 35 if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
36 signal_processor(smp_processor_id(), sigp_stop); 36 signal_processor(smp_processor_id(), sigp_stop);
37 /* Wait for all other cpus to enter stopped state */ 37 /* Wait for all other cpus to enter stopped state */
38 for_each_online_cpu(cpu) { 38 for_each_online_cpu(cpu) {
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 1efc9f21229e..5ced2725d6c7 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -65,7 +65,7 @@ static void
65tapeblock_trigger_requeue(struct tape_device *device) 65tapeblock_trigger_requeue(struct tape_device *device)
66{ 66{
67 /* Protect against rescheduling. */ 67 /* Protect against rescheduling. */
68 if (atomic_compare_and_swap(0, 1, &device->blk_data.requeue_scheduled)) 68 if (atomic_cmpxchg(&device->blk_data.requeue_scheduled, 0, 1) != 0)
69 return; 69 return;
70 schedule_work(&device->blk_data.requeue_task); 70 schedule_work(&device->blk_data.requeue_task);
71} 71}
@@ -78,7 +78,7 @@ tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
80 BUG(); 80 BUG();
81 end_that_request_last(req); 81 end_that_request_last(req, uptodate);
82} 82}
83 83
84static void 84static void
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 5473c23fcb52..5acc0ace3d7d 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -66,7 +66,7 @@ static int __diag288(enum vmwdt_func func, unsigned int timeout,
66 __cmdl = len; 66 __cmdl = len;
67 err = 0; 67 err = 0;
68 asm volatile ( 68 asm volatile (
69#ifdef __s390x__ 69#ifdef CONFIG_64BIT
70 "diag %2,%4,0x288\n" 70 "diag %2,%4,0x288\n"
71 "1: \n" 71 "1: \n"
72 ".section .fixup,\"ax\"\n" 72 ".section .fixup,\"ax\"\n"
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index a1c52a682191..daf21e03b21d 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/blacklist.c 2 * drivers/s390/cio/blacklist.c
3 * S/390 common I/O routines -- blacklisting of specific devices 3 * S/390 common I/O routines -- blacklisting of specific devices
4 * $Revision: 1.35 $ 4 * $Revision: 1.39 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -15,6 +15,7 @@
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
18#include <linux/ctype.h> 19#include <linux/ctype.h>
19#include <linux/device.h> 20#include <linux/device.h>
20 21
@@ -34,10 +35,10 @@
34 * These can be single devices or ranges of devices 35 * These can be single devices or ranges of devices
35 */ 36 */
36 37
37/* 65536 bits to indicate if a devno is blacklisted or not */ 38/* 65536 bits for each set to indicate if a devno is blacklisted or not */
38#define __BL_DEV_WORDS ((__MAX_SUBCHANNELS + (8*sizeof(long) - 1)) / \ 39#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
39 (8*sizeof(long))) 40 (8*sizeof(long)))
40static unsigned long bl_dev[__BL_DEV_WORDS]; 41static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
41typedef enum {add, free} range_action; 42typedef enum {add, free} range_action;
42 43
43/* 44/*
@@ -45,21 +46,23 @@ typedef enum {add, free} range_action;
45 * (Un-)blacklist the devices from-to 46 * (Un-)blacklist the devices from-to
46 */ 47 */
47static inline void 48static inline void
48blacklist_range (range_action action, unsigned int from, unsigned int to) 49blacklist_range (range_action action, unsigned int from, unsigned int to,
50 unsigned int ssid)
49{ 51{
50 if (!to) 52 if (!to)
51 to = from; 53 to = from;
52 54
53 if (from > to || to > __MAX_SUBCHANNELS) { 55 if (from > to || to > __MAX_SUBCHANNEL || ssid > __MAX_SSID) {
54 printk (KERN_WARNING "Invalid blacklist range " 56 printk (KERN_WARNING "Invalid blacklist range "
55 "0x%04x to 0x%04x, skipping\n", from, to); 57 "0.%x.%04x to 0.%x.%04x, skipping\n",
58 ssid, from, ssid, to);
56 return; 59 return;
57 } 60 }
58 for (; from <= to; from++) { 61 for (; from <= to; from++) {
59 if (action == add) 62 if (action == add)
60 set_bit (from, bl_dev); 63 set_bit (from, bl_dev[ssid]);
61 else 64 else
62 clear_bit (from, bl_dev); 65 clear_bit (from, bl_dev[ssid]);
63 } 66 }
64} 67}
65 68
@@ -69,7 +72,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to)
69 * Shamelessly grabbed from dasd_devmap.c. 72 * Shamelessly grabbed from dasd_devmap.c.
70 */ 73 */
71static inline int 74static inline int
72blacklist_busid(char **str, int *id0, int *id1, int *devno) 75blacklist_busid(char **str, int *id0, int *ssid, int *devno)
73{ 76{
74 int val, old_style; 77 int val, old_style;
75 char *sav; 78 char *sav;
@@ -86,7 +89,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
86 goto confused; 89 goto confused;
87 val = simple_strtoul(*str, str, 16); 90 val = simple_strtoul(*str, str, 16);
88 if (old_style || (*str)[0] != '.') { 91 if (old_style || (*str)[0] != '.') {
89 *id0 = *id1 = 0; 92 *id0 = *ssid = 0;
90 if (val < 0 || val > 0xffff) 93 if (val < 0 || val > 0xffff)
91 goto confused; 94 goto confused;
92 *devno = val; 95 *devno = val;
@@ -105,7 +108,7 @@ blacklist_busid(char **str, int *id0, int *id1, int *devno)
105 val = simple_strtoul(*str, str, 16); 108 val = simple_strtoul(*str, str, 16);
106 if (val < 0 || val > 0xff || (*str)++[0] != '.') 109 if (val < 0 || val > 0xff || (*str)++[0] != '.')
107 goto confused; 110 goto confused;
108 *id1 = val; 111 *ssid = val;
109 if (!isxdigit((*str)[0])) /* We require at least one hex digit */ 112 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
110 goto confused; 113 goto confused;
111 val = simple_strtoul(*str, str, 16); 114 val = simple_strtoul(*str, str, 16);
@@ -125,7 +128,7 @@ confused:
125static inline int 128static inline int
126blacklist_parse_parameters (char *str, range_action action) 129blacklist_parse_parameters (char *str, range_action action)
127{ 130{
128 unsigned int from, to, from_id0, to_id0, from_id1, to_id1; 131 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid;
129 132
130 while (*str != 0 && *str != '\n') { 133 while (*str != 0 && *str != '\n') {
131 range_action ra = action; 134 range_action ra = action;
@@ -142,23 +145,25 @@ blacklist_parse_parameters (char *str, range_action action)
142 */ 145 */
143 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || 146 if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 ||
144 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { 147 strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) {
145 from = 0; 148 int j;
146 to = __MAX_SUBCHANNELS; 149
147 str += 3; 150 str += 3;
151 for (j=0; j <= __MAX_SSID; j++)
152 blacklist_range(ra, 0, __MAX_SUBCHANNEL, j);
148 } else { 153 } else {
149 int rc; 154 int rc;
150 155
151 rc = blacklist_busid(&str, &from_id0, 156 rc = blacklist_busid(&str, &from_id0,
152 &from_id1, &from); 157 &from_ssid, &from);
153 if (rc) 158 if (rc)
154 continue; 159 continue;
155 to = from; 160 to = from;
156 to_id0 = from_id0; 161 to_id0 = from_id0;
157 to_id1 = from_id1; 162 to_ssid = from_ssid;
158 if (*str == '-') { 163 if (*str == '-') {
159 str++; 164 str++;
160 rc = blacklist_busid(&str, &to_id0, 165 rc = blacklist_busid(&str, &to_id0,
161 &to_id1, &to); 166 &to_ssid, &to);
162 if (rc) 167 if (rc)
163 continue; 168 continue;
164 } 169 }
@@ -168,18 +173,19 @@ blacklist_parse_parameters (char *str, range_action action)
168 strsep(&str, ",\n")); 173 strsep(&str, ",\n"));
169 continue; 174 continue;
170 } 175 }
171 if ((from_id0 != to_id0) || (from_id1 != to_id1)) { 176 if ((from_id0 != to_id0) ||
177 (from_ssid != to_ssid)) {
172 printk(KERN_WARNING "invalid cio_ignore range " 178 printk(KERN_WARNING "invalid cio_ignore range "
173 "%x.%x.%04x-%x.%x.%04x\n", 179 "%x.%x.%04x-%x.%x.%04x\n",
174 from_id0, from_id1, from, 180 from_id0, from_ssid, from,
175 to_id0, to_id1, to); 181 to_id0, to_ssid, to);
176 continue; 182 continue;
177 } 183 }
184 pr_debug("blacklist_setup: adding range "
185 "from %x.%x.%04x to %x.%x.%04x\n",
186 from_id0, from_ssid, from, to_id0, to_ssid, to);
187 blacklist_range (ra, from, to, to_ssid);
178 } 188 }
179 /* FIXME: ignoring id0 and id1 here. */
180 pr_debug("blacklist_setup: adding range "
181 "from 0.0.%04x to 0.0.%04x\n", from, to);
182 blacklist_range (ra, from, to);
183 } 189 }
184 return 1; 190 return 1;
185} 191}
@@ -213,12 +219,33 @@ __setup ("cio_ignore=", blacklist_setup);
213 * Used by validate_subchannel() 219 * Used by validate_subchannel()
214 */ 220 */
215int 221int
216is_blacklisted (int devno) 222is_blacklisted (int ssid, int devno)
217{ 223{
218 return test_bit (devno, bl_dev); 224 return test_bit (devno, bl_dev[ssid]);
219} 225}
220 226
221#ifdef CONFIG_PROC_FS 227#ifdef CONFIG_PROC_FS
228static int
229__s390_redo_validation(struct subchannel_id schid, void *data)
230{
231 int ret;
232 struct subchannel *sch;
233
234 sch = get_subchannel_by_schid(schid);
235 if (sch) {
236 /* Already known. */
237 put_device(&sch->dev);
238 return 0;
239 }
240 ret = css_probe_device(schid);
241 if (ret == -ENXIO)
242 return ret; /* We're through. */
243 if (ret == -ENOMEM)
244 /* Stop validation for now. Bad, but no need for a panic. */
245 return ret;
246 return 0;
247}
248
222/* 249/*
223 * Function: s390_redo_validation 250 * Function: s390_redo_validation
224 * Look for no longer blacklisted devices 251 * Look for no longer blacklisted devices
@@ -226,29 +253,9 @@ is_blacklisted (int devno)
226static inline void 253static inline void
227s390_redo_validation (void) 254s390_redo_validation (void)
228{ 255{
229 unsigned int irq;
230
231 CIO_TRACE_EVENT (0, "redoval"); 256 CIO_TRACE_EVENT (0, "redoval");
232 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 257
233 int ret; 258 for_each_subchannel(__s390_redo_validation, NULL);
234 struct subchannel *sch;
235
236 sch = get_subchannel_by_schid(irq);
237 if (sch) {
238 /* Already known. */
239 put_device(&sch->dev);
240 continue;
241 }
242 ret = css_probe_device(irq);
243 if (ret == -ENXIO)
244 break; /* We're through. */
245 if (ret == -ENOMEM)
246 /*
247 * Stop validation for now. Bad, but no need for a
248 * panic.
249 */
250 break;
251 }
252} 259}
253 260
254/* 261/*
@@ -278,41 +285,90 @@ blacklist_parse_proc_parameters (char *buf)
278 s390_redo_validation (); 285 s390_redo_validation ();
279} 286}
280 287
281/* FIXME: These should be real bus ids and not home-grown ones! */ 288/* Iterator struct for all devices. */
282static int cio_ignore_read (char *page, char **start, off_t off, 289struct ccwdev_iter {
283 int count, int *eof, void *data) 290 int devno;
291 int ssid;
292 int in_range;
293};
294
295static void *
296cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
284{ 297{
285 const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */ 298 struct ccwdev_iter *iter;
286 long devno; 299
287 int len; 300 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
288 301 return NULL;
289 len = 0; 302 iter = kzalloc(sizeof(struct ccwdev_iter), GFP_KERNEL);
290 for (devno = off; /* abuse the page variable 303 if (!iter)
291 * as counter, see fs/proc/generic.c */ 304 return ERR_PTR(-ENOMEM);
292 devno < __MAX_SUBCHANNELS && len + entry_size < count; devno++) { 305 iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
293 if (!test_bit(devno, bl_dev)) 306 iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
294 continue; 307 return iter;
295 len += sprintf(page + len, "0.0.%04lx", devno); 308}
296 if (test_bit(devno + 1, bl_dev)) { /* print range */ 309
297 while (++devno < __MAX_SUBCHANNELS) 310static void
298 if (!test_bit(devno, bl_dev)) 311cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
299 break; 312{
300 len += sprintf(page + len, "-0.0.%04lx", --devno); 313 if (!IS_ERR(it))
301 } 314 kfree(it);
302 len += sprintf(page + len, "\n"); 315}
303 } 316
317static void *
318cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
319{
320 struct ccwdev_iter *iter;
321
322 if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
323 return NULL;
324 iter = it;
325 if (iter->devno == __MAX_SUBCHANNEL) {
326 iter->devno = 0;
327 iter->ssid++;
328 if (iter->ssid > __MAX_SSID)
329 return NULL;
330 } else
331 iter->devno++;
332 (*offset)++;
333 return iter;
334}
304 335
305 if (devno < __MAX_SUBCHANNELS) 336static int
306 *eof = 1; 337cio_ignore_proc_seq_show(struct seq_file *s, void *it)
307 *start = (char *) (devno - off); /* number of checked entries */ 338{
308 return len; 339 struct ccwdev_iter *iter;
340
341 iter = it;
342 if (!is_blacklisted(iter->ssid, iter->devno))
343 /* Not blacklisted, nothing to output. */
344 return 0;
345 if (!iter->in_range) {
346 /* First device in range. */
347 if ((iter->devno == __MAX_SUBCHANNEL) ||
348 !is_blacklisted(iter->ssid, iter->devno + 1))
349 /* Singular device. */
350 return seq_printf(s, "0.%x.%04x\n",
351 iter->ssid, iter->devno);
352 iter->in_range = 1;
353 return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
354 }
355 if ((iter->devno == __MAX_SUBCHANNEL) ||
356 !is_blacklisted(iter->ssid, iter->devno + 1)) {
357 /* Last device in range. */
358 iter->in_range = 0;
359 return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
360 }
361 return 0;
309} 362}
310 363
311static int cio_ignore_write(struct file *file, const char __user *user_buf, 364static ssize_t
312 unsigned long user_len, void *data) 365cio_ignore_write(struct file *file, const char __user *user_buf,
366 size_t user_len, loff_t *offset)
313{ 367{
314 char *buf; 368 char *buf;
315 369
370 if (*offset)
371 return -EINVAL;
316 if (user_len > 65536) 372 if (user_len > 65536)
317 user_len = 65536; 373 user_len = 65536;
318 buf = vmalloc (user_len + 1); /* maybe better use the stack? */ 374 buf = vmalloc (user_len + 1); /* maybe better use the stack? */
@@ -330,6 +386,27 @@ static int cio_ignore_write(struct file *file, const char __user *user_buf,
330 return user_len; 386 return user_len;
331} 387}
332 388
389static struct seq_operations cio_ignore_proc_seq_ops = {
390 .start = cio_ignore_proc_seq_start,
391 .stop = cio_ignore_proc_seq_stop,
392 .next = cio_ignore_proc_seq_next,
393 .show = cio_ignore_proc_seq_show,
394};
395
396static int
397cio_ignore_proc_open(struct inode *inode, struct file *file)
398{
399 return seq_open(file, &cio_ignore_proc_seq_ops);
400}
401
402static struct file_operations cio_ignore_proc_fops = {
403 .open = cio_ignore_proc_open,
404 .read = seq_read,
405 .llseek = seq_lseek,
406 .release = seq_release,
407 .write = cio_ignore_write,
408};
409
333static int 410static int
334cio_ignore_proc_init (void) 411cio_ignore_proc_init (void)
335{ 412{
@@ -340,8 +417,7 @@ cio_ignore_proc_init (void)
340 if (!entry) 417 if (!entry)
341 return 0; 418 return 0;
342 419
343 entry->read_proc = cio_ignore_read; 420 entry->proc_fops = &cio_ignore_proc_fops;
344 entry->write_proc = cio_ignore_write;
345 421
346 return 1; 422 return 1;
347} 423}
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h
index fb42cafbe57c..95e25c1df922 100644
--- a/drivers/s390/cio/blacklist.h
+++ b/drivers/s390/cio/blacklist.h
@@ -1,6 +1,6 @@
1#ifndef S390_BLACKLIST_H 1#ifndef S390_BLACKLIST_H
2#define S390_BLACKLIST_H 2#define S390_BLACKLIST_H
3 3
4extern int is_blacklisted (int devno); 4extern int is_blacklisted (int ssid, int devno);
5 5
6#endif 6#endif
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index e7bd7f37f080..e849289d4f3c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/ccwgroup.c 2 * drivers/s390/cio/ccwgroup.c
3 * bus driver for ccwgroup 3 * bus driver for ccwgroup
4 * $Revision: 1.32 $ 4 * $Revision: 1.33 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -45,7 +45,7 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
45 return 0; 45 return 0;
46} 46}
47static int 47static int
48ccwgroup_hotplug (struct device *dev, char **envp, int num_envp, char *buffer, 48ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
49 int buffer_size) 49 int buffer_size)
50{ 50{
51 /* TODO */ 51 /* TODO */
@@ -55,7 +55,7 @@ ccwgroup_hotplug (struct device *dev, char **envp, int num_envp, char *buffer,
55static struct bus_type ccwgroup_bus_type = { 55static struct bus_type ccwgroup_bus_type = {
56 .name = "ccwgroup", 56 .name = "ccwgroup",
57 .match = ccwgroup_bus_match, 57 .match = ccwgroup_bus_match,
58 .hotplug = ccwgroup_hotplug, 58 .uevent = ccwgroup_uevent,
59}; 59};
60 60
61static inline void 61static inline void
@@ -263,7 +263,7 @@ ccwgroup_set_online(struct ccwgroup_device *gdev)
263 struct ccwgroup_driver *gdrv; 263 struct ccwgroup_driver *gdrv;
264 int ret; 264 int ret;
265 265
266 if (atomic_compare_and_swap(0, 1, &gdev->onoff)) 266 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
267 return -EAGAIN; 267 return -EAGAIN;
268 if (gdev->state == CCWGROUP_ONLINE) { 268 if (gdev->state == CCWGROUP_ONLINE) {
269 ret = 0; 269 ret = 0;
@@ -289,7 +289,7 @@ ccwgroup_set_offline(struct ccwgroup_device *gdev)
289 struct ccwgroup_driver *gdrv; 289 struct ccwgroup_driver *gdrv;
290 int ret; 290 int ret;
291 291
292 if (atomic_compare_and_swap(0, 1, &gdev->onoff)) 292 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
293 return -EAGAIN; 293 return -EAGAIN;
294 if (gdev->state == CCWGROUP_OFFLINE) { 294 if (gdev->state == CCWGROUP_OFFLINE) {
295 ret = 0; 295 ret = 0;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index fa3c23b80e3a..7270808c02d1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * $Revision: 1.120 $ 4 * $Revision: 1.126 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -24,8 +24,6 @@
24#include "ioasm.h" 24#include "ioasm.h"
25#include "chsc.h" 25#include "chsc.h"
26 26
27static struct channel_path *chps[NR_CHPIDS];
28
29static void *sei_page; 27static void *sei_page;
30 28
31static int new_channel_path(int chpid); 29static int new_channel_path(int chpid);
@@ -33,13 +31,13 @@ static int new_channel_path(int chpid);
33static inline void 31static inline void
34set_chp_logically_online(int chp, int onoff) 32set_chp_logically_online(int chp, int onoff)
35{ 33{
36 chps[chp]->state = onoff; 34 css[0]->chps[chp]->state = onoff;
37} 35}
38 36
39static int 37static int
40get_chp_status(int chp) 38get_chp_status(int chp)
41{ 39{
42 return (chps[chp] ? chps[chp]->state : -ENODEV); 40 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV);
43} 41}
44 42
45void 43void
@@ -77,7 +75,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
77 75
78 struct { 76 struct {
79 struct chsc_header request; 77 struct chsc_header request;
80 u16 reserved1; 78 u16 reserved1a:10;
79 u16 ssid:2;
80 u16 reserved1b:4;
81 u16 f_sch; /* first subchannel */ 81 u16 f_sch; /* first subchannel */
82 u16 reserved2; 82 u16 reserved2;
83 u16 l_sch; /* last subchannel */ 83 u16 l_sch; /* last subchannel */
@@ -104,8 +104,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
104 .code = 0x0004, 104 .code = 0x0004,
105 }; 105 };
106 106
107 ssd_area->f_sch = sch->irq; 107 ssd_area->ssid = sch->schid.ssid;
108 ssd_area->l_sch = sch->irq; 108 ssd_area->f_sch = sch->schid.sch_no;
109 ssd_area->l_sch = sch->schid.sch_no;
109 110
110 ccode = chsc(ssd_area); 111 ccode = chsc(ssd_area);
111 if (ccode > 0) { 112 if (ccode > 0) {
@@ -147,7 +148,8 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
147 */ 148 */
148 if (ssd_area->st > 3) { /* uhm, that looks strange... */ 149 if (ssd_area->st > 3) { /* uhm, that looks strange... */
149 CIO_CRW_EVENT(0, "Strange subchannel type %d" 150 CIO_CRW_EVENT(0, "Strange subchannel type %d"
150 " for sch %04x\n", ssd_area->st, sch->irq); 151 " for sch 0.%x.%04x\n", ssd_area->st,
152 sch->schid.ssid, sch->schid.sch_no);
151 /* 153 /*
152 * There may have been a new subchannel type defined in the 154 * There may have been a new subchannel type defined in the
153 * time since this code was written; since we don't know which 155 * time since this code was written; since we don't know which
@@ -156,8 +158,9 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
156 return 0; 158 return 0;
157 } else { 159 } else {
158 const char *type[4] = {"I/O", "chsc", "message", "ADM"}; 160 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
159 CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", 161 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
160 sch->irq, type[ssd_area->st]); 162 sch->schid.ssid, sch->schid.sch_no,
163 type[ssd_area->st]);
161 164
162 sch->ssd_info.valid = 1; 165 sch->ssd_info.valid = 1;
163 sch->ssd_info.type = ssd_area->st; 166 sch->ssd_info.type = ssd_area->st;
@@ -218,13 +221,13 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
218 int j; 221 int j;
219 int mask; 222 int mask;
220 struct subchannel *sch; 223 struct subchannel *sch;
221 __u8 *chpid; 224 struct channel_path *chpid;
222 struct schib schib; 225 struct schib schib;
223 226
224 sch = to_subchannel(dev); 227 sch = to_subchannel(dev);
225 chpid = data; 228 chpid = data;
226 for (j = 0; j < 8; j++) 229 for (j = 0; j < 8; j++)
227 if (sch->schib.pmcw.chpid[j] == *chpid) 230 if (sch->schib.pmcw.chpid[j] == chpid->id)
228 break; 231 break;
229 if (j >= 8) 232 if (j >= 8)
230 return 0; 233 return 0;
@@ -232,7 +235,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
232 mask = 0x80 >> j; 235 mask = 0x80 >> j;
233 spin_lock(&sch->lock); 236 spin_lock(&sch->lock);
234 237
235 stsch(sch->irq, &schib); 238 stsch(sch->schid, &schib);
236 if (!schib.pmcw.dnv) 239 if (!schib.pmcw.dnv)
237 goto out_unreg; 240 goto out_unreg;
238 memcpy(&sch->schib, &schib, sizeof(struct schib)); 241 memcpy(&sch->schib, &schib, sizeof(struct schib));
@@ -284,7 +287,7 @@ out_unlock:
284out_unreg: 287out_unreg:
285 spin_unlock(&sch->lock); 288 spin_unlock(&sch->lock);
286 sch->lpm = 0; 289 sch->lpm = 0;
287 if (css_enqueue_subchannel_slow(sch->irq)) { 290 if (css_enqueue_subchannel_slow(sch->schid)) {
288 css_clear_subchannel_slow_list(); 291 css_clear_subchannel_slow_list();
289 need_rescan = 1; 292 need_rescan = 1;
290 } 293 }
@@ -295,23 +298,30 @@ static inline void
295s390_set_chpid_offline( __u8 chpid) 298s390_set_chpid_offline( __u8 chpid)
296{ 299{
297 char dbf_txt[15]; 300 char dbf_txt[15];
301 struct device *dev;
298 302
299 sprintf(dbf_txt, "chpr%x", chpid); 303 sprintf(dbf_txt, "chpr%x", chpid);
300 CIO_TRACE_EVENT(2, dbf_txt); 304 CIO_TRACE_EVENT(2, dbf_txt);
301 305
302 if (get_chp_status(chpid) <= 0) 306 if (get_chp_status(chpid) <= 0)
303 return; 307 return;
304 308 dev = get_device(&css[0]->chps[chpid]->dev);
305 bus_for_each_dev(&css_bus_type, NULL, &chpid, 309 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
306 s390_subchannel_remove_chpid); 310 s390_subchannel_remove_chpid);
307 311
308 if (need_rescan || css_slow_subchannels_exist()) 312 if (need_rescan || css_slow_subchannels_exist())
309 queue_work(slow_path_wq, &slow_path_work); 313 queue_work(slow_path_wq, &slow_path_work);
314 put_device(dev);
310} 315}
311 316
317struct res_acc_data {
318 struct channel_path *chp;
319 u32 fla_mask;
320 u16 fla;
321};
322
312static int 323static int
313s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, 324s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
314 struct subchannel *sch)
315{ 325{
316 int found; 326 int found;
317 int chp; 327 int chp;
@@ -323,8 +333,9 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
323 * check if chpid is in information updated by ssd 333 * check if chpid is in information updated by ssd
324 */ 334 */
325 if (sch->ssd_info.valid && 335 if (sch->ssd_info.valid &&
326 sch->ssd_info.chpid[chp] == chpid && 336 sch->ssd_info.chpid[chp] == res_data->chp->id &&
327 (sch->ssd_info.fla[chp] & fla_mask) == fla) { 337 (sch->ssd_info.fla[chp] & res_data->fla_mask)
338 == res_data->fla) {
328 found = 1; 339 found = 1;
329 break; 340 break;
330 } 341 }
@@ -337,24 +348,87 @@ s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask,
337 * new path information and eventually check for logically 348 * new path information and eventually check for logically
338 * offline chpids. 349 * offline chpids.
339 */ 350 */
340 ccode = stsch(sch->irq, &sch->schib); 351 ccode = stsch(sch->schid, &sch->schib);
341 if (ccode > 0) 352 if (ccode > 0)
342 return 0; 353 return 0;
343 354
344 return 0x80 >> chp; 355 return 0x80 >> chp;
345} 356}
346 357
358static inline int
359s390_process_res_acc_new_sch(struct subchannel_id schid)
360{
361 struct schib schib;
362 int ret;
363 /*
364 * We don't know the device yet, but since a path
365 * may be available now to the device we'll have
366 * to do recognition again.
367 * Since we don't have any idea about which chpid
368 * that beast may be on we'll have to do a stsch
369 * on all devices, grr...
370 */
371 if (stsch_err(schid, &schib))
372 /* We're through */
373 return need_rescan ? -EAGAIN : -ENXIO;
374
375 /* Put it on the slow path. */
376 ret = css_enqueue_subchannel_slow(schid);
377 if (ret) {
378 css_clear_subchannel_slow_list();
379 need_rescan = 1;
380 return -EAGAIN;
381 }
382 return 0;
383}
384
347static int 385static int
348s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) 386__s390_process_res_acc(struct subchannel_id schid, void *data)
349{ 387{
388 int chp_mask, old_lpm;
389 struct res_acc_data *res_data;
350 struct subchannel *sch; 390 struct subchannel *sch;
351 int irq, rc; 391
392 res_data = (struct res_acc_data *)data;
393 sch = get_subchannel_by_schid(schid);
394 if (!sch)
395 /* Check if a subchannel is newly available. */
396 return s390_process_res_acc_new_sch(schid);
397
398 spin_lock_irq(&sch->lock);
399
400 chp_mask = s390_process_res_acc_sch(res_data, sch);
401
402 if (chp_mask == 0) {
403 spin_unlock_irq(&sch->lock);
404 return 0;
405 }
406 old_lpm = sch->lpm;
407 sch->lpm = ((sch->schib.pmcw.pim &
408 sch->schib.pmcw.pam &
409 sch->schib.pmcw.pom)
410 | chp_mask) & sch->opm;
411 if (!old_lpm && sch->lpm)
412 device_trigger_reprobe(sch);
413 else if (sch->driver && sch->driver->verify)
414 sch->driver->verify(&sch->dev);
415
416 spin_unlock_irq(&sch->lock);
417 put_device(&sch->dev);
418 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0;
419}
420
421
422static int
423s390_process_res_acc (struct res_acc_data *res_data)
424{
425 int rc;
352 char dbf_txt[15]; 426 char dbf_txt[15];
353 427
354 sprintf(dbf_txt, "accpr%x", chpid); 428 sprintf(dbf_txt, "accpr%x", res_data->chp->id);
355 CIO_TRACE_EVENT( 2, dbf_txt); 429 CIO_TRACE_EVENT( 2, dbf_txt);
356 if (fla != 0) { 430 if (res_data->fla != 0) {
357 sprintf(dbf_txt, "fla%x", fla); 431 sprintf(dbf_txt, "fla%x", res_data->fla);
358 CIO_TRACE_EVENT( 2, dbf_txt); 432 CIO_TRACE_EVENT( 2, dbf_txt);
359 } 433 }
360 434
@@ -365,70 +439,11 @@ s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask)
365 * The more information we have (info), the less scanning 439 * The more information we have (info), the less scanning
366 * will we have to do. 440 * will we have to do.
367 */ 441 */
368 442 rc = for_each_subchannel(__s390_process_res_acc, res_data);
369 if (!get_chp_status(chpid)) 443 if (css_slow_subchannels_exist())
370 return 0; /* no need to do the rest */ 444 rc = -EAGAIN;
371 445 else if (rc != -EAGAIN)
372 rc = 0; 446 rc = 0;
373 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
374 int chp_mask, old_lpm;
375
376 sch = get_subchannel_by_schid(irq);
377 if (!sch) {
378 struct schib schib;
379 int ret;
380 /*
381 * We don't know the device yet, but since a path
382 * may be available now to the device we'll have
383 * to do recognition again.
384 * Since we don't have any idea about which chpid
385 * that beast may be on we'll have to do a stsch
386 * on all devices, grr...
387 */
388 if (stsch(irq, &schib)) {
389 /* We're through */
390 if (need_rescan)
391 rc = -EAGAIN;
392 break;
393 }
394 if (need_rescan) {
395 rc = -EAGAIN;
396 continue;
397 }
398 /* Put it on the slow path. */
399 ret = css_enqueue_subchannel_slow(irq);
400 if (ret) {
401 css_clear_subchannel_slow_list();
402 need_rescan = 1;
403 }
404 rc = -EAGAIN;
405 continue;
406 }
407
408 spin_lock_irq(&sch->lock);
409
410 chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch);
411
412 if (chp_mask == 0) {
413
414 spin_unlock_irq(&sch->lock);
415 continue;
416 }
417 old_lpm = sch->lpm;
418 sch->lpm = ((sch->schib.pmcw.pim &
419 sch->schib.pmcw.pam &
420 sch->schib.pmcw.pom)
421 | chp_mask) & sch->opm;
422 if (!old_lpm && sch->lpm)
423 device_trigger_reprobe(sch);
424 else if (sch->driver && sch->driver->verify)
425 sch->driver->verify(&sch->dev);
426
427 spin_unlock_irq(&sch->lock);
428 put_device(&sch->dev);
429 if (fla_mask == 0xffff)
430 break;
431 }
432 return rc; 447 return rc;
433} 448}
434 449
@@ -466,6 +481,7 @@ int
466chsc_process_crw(void) 481chsc_process_crw(void)
467{ 482{
468 int chpid, ret; 483 int chpid, ret;
484 struct res_acc_data res_data;
469 struct { 485 struct {
470 struct chsc_header request; 486 struct chsc_header request;
471 u32 reserved1; 487 u32 reserved1;
@@ -499,8 +515,9 @@ chsc_process_crw(void)
499 ret = 0; 515 ret = 0;
500 do { 516 do {
501 int ccode, status; 517 int ccode, status;
518 struct device *dev;
502 memset(sei_area, 0, sizeof(*sei_area)); 519 memset(sei_area, 0, sizeof(*sei_area));
503 520 memset(&res_data, 0, sizeof(struct res_acc_data));
504 sei_area->request = (struct chsc_header) { 521 sei_area->request = (struct chsc_header) {
505 .length = 0x0010, 522 .length = 0x0010,
506 .code = 0x000e, 523 .code = 0x000e,
@@ -573,26 +590,25 @@ chsc_process_crw(void)
573 if (status < 0) 590 if (status < 0)
574 new_channel_path(sei_area->rsid); 591 new_channel_path(sei_area->rsid);
575 else if (!status) 592 else if (!status)
576 return 0; 593 break;
577 if ((sei_area->vf & 0x80) == 0) { 594 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
578 pr_debug("chpid: %x\n", sei_area->rsid); 595 res_data.chp = to_channelpath(dev);
579 ret = s390_process_res_acc(sei_area->rsid, 596 pr_debug("chpid: %x", sei_area->rsid);
580 0, 0); 597 if ((sei_area->vf & 0xc0) != 0) {
581 } else if ((sei_area->vf & 0xc0) == 0x80) { 598 res_data.fla = sei_area->fla;
582 pr_debug("chpid: %x link addr: %x\n", 599 if ((sei_area->vf & 0xc0) == 0xc0) {
583 sei_area->rsid, sei_area->fla); 600 pr_debug(" full link addr: %x",
584 ret = s390_process_res_acc(sei_area->rsid, 601 sei_area->fla);
585 sei_area->fla, 602 res_data.fla_mask = 0xffff;
586 0xff00); 603 } else {
587 } else if ((sei_area->vf & 0xc0) == 0xc0) { 604 pr_debug(" link addr: %x",
588 pr_debug("chpid: %x full link addr: %x\n", 605 sei_area->fla);
589 sei_area->rsid, sei_area->fla); 606 res_data.fla_mask = 0xff00;
590 ret = s390_process_res_acc(sei_area->rsid, 607 }
591 sei_area->fla,
592 0xffff);
593 } 608 }
594 pr_debug("\n"); 609 ret = s390_process_res_acc(&res_data);
595 610 pr_debug("\n\n");
611 put_device(dev);
596 break; 612 break;
597 613
598 default: /* other stuff */ 614 default: /* other stuff */
@@ -604,12 +620,72 @@ chsc_process_crw(void)
604 return ret; 620 return ret;
605} 621}
606 622
623static inline int
624__chp_add_new_sch(struct subchannel_id schid)
625{
626 struct schib schib;
627 int ret;
628
629 if (stsch(schid, &schib))
630 /* We're through */
631 return need_rescan ? -EAGAIN : -ENXIO;
632
633 /* Put it on the slow path. */
634 ret = css_enqueue_subchannel_slow(schid);
635 if (ret) {
636 css_clear_subchannel_slow_list();
637 need_rescan = 1;
638 return -EAGAIN;
639 }
640 return 0;
641}
642
643
607static int 644static int
608chp_add(int chpid) 645__chp_add(struct subchannel_id schid, void *data)
609{ 646{
647 int i;
648 struct channel_path *chp;
610 struct subchannel *sch; 649 struct subchannel *sch;
611 int irq, ret, rc; 650
651 chp = (struct channel_path *)data;
652 sch = get_subchannel_by_schid(schid);
653 if (!sch)
654 /* Check if the subchannel is now available. */
655 return __chp_add_new_sch(schid);
656 spin_lock(&sch->lock);
657 for (i=0; i<8; i++)
658 if (sch->schib.pmcw.chpid[i] == chp->id) {
659 if (stsch(sch->schid, &sch->schib) != 0) {
660 /* Endgame. */
661 spin_unlock(&sch->lock);
662 return -ENXIO;
663 }
664 break;
665 }
666 if (i==8) {
667 spin_unlock(&sch->lock);
668 return 0;
669 }
670 sch->lpm = ((sch->schib.pmcw.pim &
671 sch->schib.pmcw.pam &
672 sch->schib.pmcw.pom)
673 | 0x80 >> i) & sch->opm;
674
675 if (sch->driver && sch->driver->verify)
676 sch->driver->verify(&sch->dev);
677
678 spin_unlock(&sch->lock);
679 put_device(&sch->dev);
680 return 0;
681}
682
683static int
684chp_add(int chpid)
685{
686 int rc;
612 char dbf_txt[15]; 687 char dbf_txt[15];
688 struct device *dev;
613 689
614 if (!get_chp_status(chpid)) 690 if (!get_chp_status(chpid))
615 return 0; /* no need to do the rest */ 691 return 0; /* no need to do the rest */
@@ -617,59 +693,13 @@ chp_add(int chpid)
617 sprintf(dbf_txt, "cadd%x", chpid); 693 sprintf(dbf_txt, "cadd%x", chpid);
618 CIO_TRACE_EVENT(2, dbf_txt); 694 CIO_TRACE_EVENT(2, dbf_txt);
619 695
620 rc = 0; 696 dev = get_device(&css[0]->chps[chpid]->dev);
621 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 697 rc = for_each_subchannel(__chp_add, to_channelpath(dev));
622 int i; 698 if (css_slow_subchannels_exist())
623 699 rc = -EAGAIN;
624 sch = get_subchannel_by_schid(irq); 700 if (rc != -EAGAIN)
625 if (!sch) { 701 rc = 0;
626 struct schib schib; 702 put_device(dev);
627
628 if (stsch(irq, &schib)) {
629 /* We're through */
630 if (need_rescan)
631 rc = -EAGAIN;
632 break;
633 }
634 if (need_rescan) {
635 rc = -EAGAIN;
636 continue;
637 }
638 /* Put it on the slow path. */
639 ret = css_enqueue_subchannel_slow(irq);
640 if (ret) {
641 css_clear_subchannel_slow_list();
642 need_rescan = 1;
643 }
644 rc = -EAGAIN;
645 continue;
646 }
647
648 spin_lock(&sch->lock);
649 for (i=0; i<8; i++)
650 if (sch->schib.pmcw.chpid[i] == chpid) {
651 if (stsch(sch->irq, &sch->schib) != 0) {
652 /* Endgame. */
653 spin_unlock(&sch->lock);
654 return rc;
655 }
656 break;
657 }
658 if (i==8) {
659 spin_unlock(&sch->lock);
660 return rc;
661 }
662 sch->lpm = ((sch->schib.pmcw.pim &
663 sch->schib.pmcw.pam &
664 sch->schib.pmcw.pom)
665 | 0x80 >> i) & sch->opm;
666
667 if (sch->driver && sch->driver->verify)
668 sch->driver->verify(&sch->dev);
669
670 spin_unlock(&sch->lock);
671 put_device(&sch->dev);
672 }
673 return rc; 703 return rc;
674} 704}
675 705
@@ -702,7 +732,7 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
702 if (!device_is_online(sch)) 732 if (!device_is_online(sch))
703 /* cio could be doing I/O. */ 733 /* cio could be doing I/O. */
704 return 0; 734 return 0;
705 cc = stsch(sch->irq, &sch->schib); 735 cc = stsch(sch->schid, &sch->schib);
706 if (cc) 736 if (cc)
707 return 0; 737 return 0;
708 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { 738 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
@@ -743,7 +773,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
743 * just varied off path. Then kill it. 773 * just varied off path. Then kill it.
744 */ 774 */
745 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { 775 if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
746 if (css_enqueue_subchannel_slow(sch->irq)) { 776 if (css_enqueue_subchannel_slow(sch->schid)) {
747 css_clear_subchannel_slow_list(); 777 css_clear_subchannel_slow_list();
748 need_rescan = 1; 778 need_rescan = 1;
749 } 779 }
@@ -781,6 +811,29 @@ s390_subchannel_vary_chpid_on(struct device *dev, void *data)
781 return 0; 811 return 0;
782} 812}
783 813
814static int
815__s390_vary_chpid_on(struct subchannel_id schid, void *data)
816{
817 struct schib schib;
818 struct subchannel *sch;
819
820 sch = get_subchannel_by_schid(schid);
821 if (sch) {
822 put_device(&sch->dev);
823 return 0;
824 }
825 if (stsch_err(schid, &schib))
826 /* We're through */
827 return -ENXIO;
828 /* Put it on the slow path. */
829 if (css_enqueue_subchannel_slow(schid)) {
830 css_clear_subchannel_slow_list();
831 need_rescan = 1;
832 return -EAGAIN;
833 }
834 return 0;
835}
836
784/* 837/*
785 * Function: s390_vary_chpid 838 * Function: s390_vary_chpid
786 * Varies the specified chpid online or offline 839 * Varies the specified chpid online or offline
@@ -789,8 +842,7 @@ static int
789s390_vary_chpid( __u8 chpid, int on) 842s390_vary_chpid( __u8 chpid, int on)
790{ 843{
791 char dbf_text[15]; 844 char dbf_text[15];
792 int status, irq, ret; 845 int status;
793 struct subchannel *sch;
794 846
795 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); 847 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
796 CIO_TRACE_EVENT( 2, dbf_text); 848 CIO_TRACE_EVENT( 2, dbf_text);
@@ -815,30 +867,9 @@ s390_vary_chpid( __u8 chpid, int on)
815 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? 867 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
816 s390_subchannel_vary_chpid_on : 868 s390_subchannel_vary_chpid_on :
817 s390_subchannel_vary_chpid_off); 869 s390_subchannel_vary_chpid_off);
818 if (!on) 870 if (on)
819 goto out; 871 /* Scan for new devices on varied on path. */
820 /* Scan for new devices on varied on path. */ 872 for_each_subchannel(__s390_vary_chpid_on, NULL);
821 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
822 struct schib schib;
823
824 if (need_rescan)
825 break;
826 sch = get_subchannel_by_schid(irq);
827 if (sch) {
828 put_device(&sch->dev);
829 continue;
830 }
831 if (stsch(irq, &schib))
832 /* We're through */
833 break;
834 /* Put it on the slow path. */
835 ret = css_enqueue_subchannel_slow(irq);
836 if (ret) {
837 css_clear_subchannel_slow_list();
838 need_rescan = 1;
839 }
840 }
841out:
842 if (need_rescan || css_slow_subchannels_exist()) 873 if (need_rescan || css_slow_subchannels_exist())
843 queue_work(slow_path_wq, &slow_path_work); 874 queue_work(slow_path_wq, &slow_path_work);
844 return 0; 875 return 0;
@@ -995,7 +1026,7 @@ new_channel_path(int chpid)
995 chp->id = chpid; 1026 chp->id = chpid;
996 chp->state = 1; 1027 chp->state = 1;
997 chp->dev = (struct device) { 1028 chp->dev = (struct device) {
998 .parent = &css_bus_device, 1029 .parent = &css[0]->device,
999 .release = chp_release, 1030 .release = chp_release,
1000 }; 1031 };
1001 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1032 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
@@ -1017,7 +1048,7 @@ new_channel_path(int chpid)
1017 device_unregister(&chp->dev); 1048 device_unregister(&chp->dev);
1018 goto out_free; 1049 goto out_free;
1019 } else 1050 } else
1020 chps[chpid] = chp; 1051 css[0]->chps[chpid] = chp;
1021 return ret; 1052 return ret;
1022out_free: 1053out_free:
1023 kfree(chp); 1054 kfree(chp);
@@ -1030,7 +1061,7 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1030 struct channel_path *chp; 1061 struct channel_path *chp;
1031 struct channel_path_desc *desc; 1062 struct channel_path_desc *desc;
1032 1063
1033 chp = chps[sch->schib.pmcw.chpid[chp_no]]; 1064 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1034 if (!chp) 1065 if (!chp)
1035 return NULL; 1066 return NULL;
1036 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 1067 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
@@ -1051,6 +1082,54 @@ chsc_alloc_sei_area(void)
1051 return (sei_page ? 0 : -ENOMEM); 1082 return (sei_page ? 0 : -ENOMEM);
1052} 1083}
1053 1084
1085int __init
1086chsc_enable_facility(int operation_code)
1087{
1088 int ret;
1089 struct {
1090 struct chsc_header request;
1091 u8 reserved1:4;
1092 u8 format:4;
1093 u8 reserved2;
1094 u16 operation_code;
1095 u32 reserved3;
1096 u32 reserved4;
1097 u32 operation_data_area[252];
1098 struct chsc_header response;
1099 u32 reserved5:4;
1100 u32 format2:4;
1101 u32 reserved6:24;
1102 } *sda_area;
1103
1104 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1105 if (!sda_area)
1106 return -ENOMEM;
1107 sda_area->request = (struct chsc_header) {
1108 .length = 0x0400,
1109 .code = 0x0031,
1110 };
1111 sda_area->operation_code = operation_code;
1112
1113 ret = chsc(sda_area);
1114 if (ret > 0) {
1115 ret = (ret == 3) ? -ENODEV : -EBUSY;
1116 goto out;
1117 }
1118 switch (sda_area->response.code) {
1119 case 0x0003: /* invalid request block */
1120 case 0x0007:
1121 ret = -EINVAL;
1122 break;
1123 case 0x0004: /* command not provided */
1124 case 0x0101: /* facility not provided */
1125 ret = -EOPNOTSUPP;
1126 break;
1127 }
1128 out:
1129 free_page((unsigned long)sda_area);
1130 return ret;
1131}
1132
1054subsys_initcall(chsc_alloc_sei_area); 1133subsys_initcall(chsc_alloc_sei_area);
1055 1134
1056struct css_general_char css_general_characteristics; 1135struct css_general_char css_general_characteristics;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index be20da49d147..44e4b4bb1c5a 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -1,12 +1,12 @@
1#ifndef S390_CHSC_H 1#ifndef S390_CHSC_H
2#define S390_CHSC_H 2#define S390_CHSC_H
3 3
4#define NR_CHPIDS 256
5
6#define CHSC_SEI_ACC_CHPID 1 4#define CHSC_SEI_ACC_CHPID 1
7#define CHSC_SEI_ACC_LINKADDR 2 5#define CHSC_SEI_ACC_LINKADDR 2
8#define CHSC_SEI_ACC_FULLLINKADDR 3 6#define CHSC_SEI_ACC_FULLLINKADDR 3
9 7
8#define CHSC_SDA_OC_MSS 0x2
9
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
@@ -43,7 +43,9 @@ struct css_general_char {
43 u32 ext_mb : 1; /* bit 48 */ 43 u32 ext_mb : 1; /* bit 48 */
44 u32 : 7; 44 u32 : 7;
45 u32 aif_tdd : 1; /* bit 56 */ 45 u32 aif_tdd : 1; /* bit 56 */
46 u32 : 10; 46 u32 : 1;
47 u32 qebsm : 1; /* bit 58 */
48 u32 : 8;
47 u32 aif_osa : 1; /* bit 67 */ 49 u32 aif_osa : 1; /* bit 67 */
48 u32 : 28; 50 u32 : 28;
49}__attribute__((packed)); 51}__attribute__((packed));
@@ -63,4 +65,9 @@ extern int chsc_determine_css_characteristics(void);
63extern int css_characteristics_avail; 65extern int css_characteristics_avail;
64 66
65extern void *chsc_get_chp_desc(struct subchannel*, int); 67extern void *chsc_get_chp_desc(struct subchannel*, int);
68
69extern int chsc_enable_facility(int);
70
71#define to_channelpath(dev) container_of(dev, struct channel_path, dev)
72
66#endif 73#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 185bc73c3ecd..7376bc87206d 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * $Revision: 1.135 $ 4 * $Revision: 1.138 $
5 * 5 *
6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -135,7 +135,7 @@ cio_tpi(void)
135 return 0; 135 return 0;
136 irb = (struct irb *) __LC_IRB; 136 irb = (struct irb *) __LC_IRB;
137 /* Store interrupt response block to lowcore. */ 137 /* Store interrupt response block to lowcore. */
138 if (tsch (tpi_info->irq, irb) != 0) 138 if (tsch (tpi_info->schid, irb) != 0)
139 /* Not status pending or not operational. */ 139 /* Not status pending or not operational. */
140 return 1; 140 return 1;
141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 141 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -163,10 +163,11 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
163 else 163 else
164 sch->lpm = 0; 164 sch->lpm = 0;
165 165
166 stsch (sch->irq, &sch->schib); 166 stsch (sch->schid, &sch->schib);
167 167
168 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " 168 CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
169 "subchannel %04x!\n", sch->irq); 169 "subchannel 0.%x.%04x!\n", sch->schid.ssid,
170 sch->schid.sch_no);
170 sprintf(dbf_text, "no%s", sch->dev.bus_id); 171 sprintf(dbf_text, "no%s", sch->dev.bus_id);
171 CIO_TRACE_EVENT(0, dbf_text); 172 CIO_TRACE_EVENT(0, dbf_text);
172 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); 173 CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
@@ -194,7 +195,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
194 sch->orb.spnd = sch->options.suspend; 195 sch->orb.spnd = sch->options.suspend;
195 sch->orb.ssic = sch->options.suspend && sch->options.inter; 196 sch->orb.ssic = sch->options.suspend && sch->options.inter;
196 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; 197 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm;
197#ifdef CONFIG_ARCH_S390X 198#ifdef CONFIG_64BIT
198 /* 199 /*
199 * for 64 bit we always support 64 bit IDAWs with 4k page size only 200 * for 64 bit we always support 64 bit IDAWs with 4k page size only
200 */ 201 */
@@ -204,7 +205,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
204 sch->orb.key = key >> 4; 205 sch->orb.key = key >> 4;
205 /* issue "Start Subchannel" */ 206 /* issue "Start Subchannel" */
206 sch->orb.cpa = (__u32) __pa (cpa); 207 sch->orb.cpa = (__u32) __pa (cpa);
207 ccode = ssch (sch->irq, &sch->orb); 208 ccode = ssch (sch->schid, &sch->orb);
208 209
209 /* process condition code */ 210 /* process condition code */
210 sprintf (dbf_txt, "ccode:%d", ccode); 211 sprintf (dbf_txt, "ccode:%d", ccode);
@@ -243,7 +244,7 @@ cio_resume (struct subchannel *sch)
243 CIO_TRACE_EVENT (4, "resIO"); 244 CIO_TRACE_EVENT (4, "resIO");
244 CIO_TRACE_EVENT (4, sch->dev.bus_id); 245 CIO_TRACE_EVENT (4, sch->dev.bus_id);
245 246
246 ccode = rsch (sch->irq); 247 ccode = rsch (sch->schid);
247 248
248 sprintf (dbf_txt, "ccode:%d", ccode); 249 sprintf (dbf_txt, "ccode:%d", ccode);
249 CIO_TRACE_EVENT (4, dbf_txt); 250 CIO_TRACE_EVENT (4, dbf_txt);
@@ -283,7 +284,7 @@ cio_halt(struct subchannel *sch)
283 /* 284 /*
284 * Issue "Halt subchannel" and process condition code 285 * Issue "Halt subchannel" and process condition code
285 */ 286 */
286 ccode = hsch (sch->irq); 287 ccode = hsch (sch->schid);
287 288
288 sprintf (dbf_txt, "ccode:%d", ccode); 289 sprintf (dbf_txt, "ccode:%d", ccode);
289 CIO_TRACE_EVENT (2, dbf_txt); 290 CIO_TRACE_EVENT (2, dbf_txt);
@@ -318,7 +319,7 @@ cio_clear(struct subchannel *sch)
318 /* 319 /*
319 * Issue "Clear subchannel" and process condition code 320 * Issue "Clear subchannel" and process condition code
320 */ 321 */
321 ccode = csch (sch->irq); 322 ccode = csch (sch->schid);
322 323
323 sprintf (dbf_txt, "ccode:%d", ccode); 324 sprintf (dbf_txt, "ccode:%d", ccode);
324 CIO_TRACE_EVENT (2, dbf_txt); 325 CIO_TRACE_EVENT (2, dbf_txt);
@@ -351,7 +352,7 @@ cio_cancel (struct subchannel *sch)
351 CIO_TRACE_EVENT (2, "cancelIO"); 352 CIO_TRACE_EVENT (2, "cancelIO");
352 CIO_TRACE_EVENT (2, sch->dev.bus_id); 353 CIO_TRACE_EVENT (2, sch->dev.bus_id);
353 354
354 ccode = xsch (sch->irq); 355 ccode = xsch (sch->schid);
355 356
356 sprintf (dbf_txt, "ccode:%d", ccode); 357 sprintf (dbf_txt, "ccode:%d", ccode);
357 CIO_TRACE_EVENT (2, dbf_txt); 358 CIO_TRACE_EVENT (2, dbf_txt);
@@ -359,7 +360,7 @@ cio_cancel (struct subchannel *sch)
359 switch (ccode) { 360 switch (ccode) {
360 case 0: /* success */ 361 case 0: /* success */
361 /* Update information in scsw. */ 362 /* Update information in scsw. */
362 stsch (sch->irq, &sch->schib); 363 stsch (sch->schid, &sch->schib);
363 return 0; 364 return 0;
364 case 1: /* status pending */ 365 case 1: /* status pending */
365 return -EBUSY; 366 return -EBUSY;
@@ -381,7 +382,7 @@ cio_modify (struct subchannel *sch)
381 382
382 ret = 0; 383 ret = 0;
383 for (retry = 0; retry < 5; retry++) { 384 for (retry = 0; retry < 5; retry++) {
384 ccode = msch_err (sch->irq, &sch->schib); 385 ccode = msch_err (sch->schid, &sch->schib);
385 if (ccode < 0) /* -EIO if msch gets a program check. */ 386 if (ccode < 0) /* -EIO if msch gets a program check. */
386 return ccode; 387 return ccode;
387 switch (ccode) { 388 switch (ccode) {
@@ -414,7 +415,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
414 CIO_TRACE_EVENT (2, "ensch"); 415 CIO_TRACE_EVENT (2, "ensch");
415 CIO_TRACE_EVENT (2, sch->dev.bus_id); 416 CIO_TRACE_EVENT (2, sch->dev.bus_id);
416 417
417 ccode = stsch (sch->irq, &sch->schib); 418 ccode = stsch (sch->schid, &sch->schib);
418 if (ccode) 419 if (ccode)
419 return -ENODEV; 420 return -ENODEV;
420 421
@@ -432,13 +433,13 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
432 */ 433 */
433 sch->schib.pmcw.csense = 0; 434 sch->schib.pmcw.csense = 0;
434 if (ret == 0) { 435 if (ret == 0) {
435 stsch (sch->irq, &sch->schib); 436 stsch (sch->schid, &sch->schib);
436 if (sch->schib.pmcw.ena) 437 if (sch->schib.pmcw.ena)
437 break; 438 break;
438 } 439 }
439 if (ret == -EBUSY) { 440 if (ret == -EBUSY) {
440 struct irb irb; 441 struct irb irb;
441 if (tsch(sch->irq, &irb) != 0) 442 if (tsch(sch->schid, &irb) != 0)
442 break; 443 break;
443 } 444 }
444 } 445 }
@@ -461,7 +462,7 @@ cio_disable_subchannel (struct subchannel *sch)
461 CIO_TRACE_EVENT (2, "dissch"); 462 CIO_TRACE_EVENT (2, "dissch");
462 CIO_TRACE_EVENT (2, sch->dev.bus_id); 463 CIO_TRACE_EVENT (2, sch->dev.bus_id);
463 464
464 ccode = stsch (sch->irq, &sch->schib); 465 ccode = stsch (sch->schid, &sch->schib);
465 if (ccode == 3) /* Not operational. */ 466 if (ccode == 3) /* Not operational. */
466 return -ENODEV; 467 return -ENODEV;
467 468
@@ -485,7 +486,7 @@ cio_disable_subchannel (struct subchannel *sch)
485 */ 486 */
486 break; 487 break;
487 if (ret == 0) { 488 if (ret == 0) {
488 stsch (sch->irq, &sch->schib); 489 stsch (sch->schid, &sch->schib);
489 if (!sch->schib.pmcw.ena) 490 if (!sch->schib.pmcw.ena)
490 break; 491 break;
491 } 492 }
@@ -508,12 +509,12 @@ cio_disable_subchannel (struct subchannel *sch)
508 * -ENODEV for subchannels with invalid device number or blacklisted devices 509 * -ENODEV for subchannels with invalid device number or blacklisted devices
509 */ 510 */
510int 511int
511cio_validate_subchannel (struct subchannel *sch, unsigned int irq) 512cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
512{ 513{
513 char dbf_txt[15]; 514 char dbf_txt[15];
514 int ccode; 515 int ccode;
515 516
516 sprintf (dbf_txt, "valsch%x", irq); 517 sprintf (dbf_txt, "valsch%x", schid.sch_no);
517 CIO_TRACE_EVENT (4, dbf_txt); 518 CIO_TRACE_EVENT (4, dbf_txt);
518 519
519 /* Nuke all fields. */ 520 /* Nuke all fields. */
@@ -522,17 +523,20 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
522 spin_lock_init(&sch->lock); 523 spin_lock_init(&sch->lock);
523 524
524 /* Set a name for the subchannel */ 525 /* Set a name for the subchannel */
525 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); 526 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
527 schid.sch_no);
526 528
527 /* 529 /*
528 * The first subchannel that is not-operational (ccode==3) 530 * The first subchannel that is not-operational (ccode==3)
529 * indicates that there aren't any more devices available. 531 * indicates that there aren't any more devices available.
532 * If stsch gets an exception, it means the current subchannel set
533 * is not valid.
530 */ 534 */
531 sch->irq = irq; 535 ccode = stsch_err (schid, &sch->schib);
532 ccode = stsch (irq, &sch->schib);
533 if (ccode) 536 if (ccode)
534 return -ENXIO; 537 return (ccode == 3) ? -ENXIO : ccode;
535 538
539 sch->schid = schid;
536 /* Copy subchannel type from path management control word. */ 540 /* Copy subchannel type from path management control word. */
537 sch->st = sch->schib.pmcw.st; 541 sch->st = sch->schib.pmcw.st;
538 542
@@ -541,9 +545,9 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
541 */ 545 */
542 if (sch->st != 0) { 546 if (sch->st != 0) {
543 CIO_DEBUG(KERN_INFO, 0, 547 CIO_DEBUG(KERN_INFO, 0,
544 "Subchannel %04X reports " 548 "Subchannel 0.%x.%04x reports "
545 "non-I/O subchannel type %04X\n", 549 "non-I/O subchannel type %04X\n",
546 sch->irq, sch->st); 550 sch->schid.ssid, sch->schid.sch_no, sch->st);
547 /* We stop here for non-io subchannels. */ 551 /* We stop here for non-io subchannels. */
548 return sch->st; 552 return sch->st;
549 } 553 }
@@ -554,26 +558,29 @@ cio_validate_subchannel (struct subchannel *sch, unsigned int irq)
554 return -ENODEV; 558 return -ENODEV;
555 559
556 /* Devno is valid. */ 560 /* Devno is valid. */
557 if (is_blacklisted (sch->schib.pmcw.dev)) { 561 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
558 /* 562 /*
559 * This device must not be known to Linux. So we simply 563 * This device must not be known to Linux. So we simply
560 * say that there is no device and return ENODEV. 564 * say that there is no device and return ENODEV.
561 */ 565 */
562 CIO_MSG_EVENT(0, "Blacklisted device detected " 566 CIO_MSG_EVENT(0, "Blacklisted device detected "
563 "at devno %04X\n", sch->schib.pmcw.dev); 567 "at devno %04X, subchannel set %x\n",
568 sch->schib.pmcw.dev, sch->schid.ssid);
564 return -ENODEV; 569 return -ENODEV;
565 } 570 }
566 sch->opm = 0xff; 571 sch->opm = 0xff;
567 chsc_validate_chpids(sch); 572 if (!cio_is_console(sch->schid))
573 chsc_validate_chpids(sch);
568 sch->lpm = sch->schib.pmcw.pim & 574 sch->lpm = sch->schib.pmcw.pim &
569 sch->schib.pmcw.pam & 575 sch->schib.pmcw.pam &
570 sch->schib.pmcw.pom & 576 sch->schib.pmcw.pom &
571 sch->opm; 577 sch->opm;
572 578
573 CIO_DEBUG(KERN_INFO, 0, 579 CIO_DEBUG(KERN_INFO, 0,
574 "Detected device %04X on subchannel %04X" 580 "Detected device %04x on subchannel 0.%x.%04X"
575 " - PIM = %02X, PAM = %02X, POM = %02X\n", 581 " - PIM = %02X, PAM = %02X, POM = %02X\n",
576 sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, 582 sch->schib.pmcw.dev, sch->schid.ssid,
583 sch->schid.sch_no, sch->schib.pmcw.pim,
577 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 584 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
578 585
579 /* 586 /*
@@ -632,7 +639,7 @@ do_IRQ (struct pt_regs *regs)
632 if (sch) 639 if (sch)
633 spin_lock(&sch->lock); 640 spin_lock(&sch->lock);
634 /* Store interrupt response block to lowcore. */ 641 /* Store interrupt response block to lowcore. */
635 if (tsch (tpi_info->irq, irb) == 0 && sch) { 642 if (tsch (tpi_info->schid, irb) == 0 && sch) {
636 /* Keep subchannel information word up to date. */ 643 /* Keep subchannel information word up to date. */
637 memcpy (&sch->schib.scsw, &irb->scsw, 644 memcpy (&sch->schib.scsw, &irb->scsw,
638 sizeof (irb->scsw)); 645 sizeof (irb->scsw));
@@ -691,28 +698,36 @@ wait_cons_dev (void)
691} 698}
692 699
693static int 700static int
694cio_console_irq(void) 701cio_test_for_console(struct subchannel_id schid, void *data)
695{ 702{
696 int irq; 703 if (stsch_err(schid, &console_subchannel.schib) != 0)
704 return -ENXIO;
705 if (console_subchannel.schib.pmcw.dnv &&
706 console_subchannel.schib.pmcw.dev ==
707 console_devno) {
708 console_irq = schid.sch_no;
709 return 1; /* found */
710 }
711 return 0;
712}
713
714
715static int
716cio_get_console_sch_no(void)
717{
718 struct subchannel_id schid;
697 719
720 init_subchannel_id(&schid);
698 if (console_irq != -1) { 721 if (console_irq != -1) {
699 /* VM provided us with the irq number of the console. */ 722 /* VM provided us with the irq number of the console. */
700 if (stsch(console_irq, &console_subchannel.schib) != 0 || 723 schid.sch_no = console_irq;
724 if (stsch(schid, &console_subchannel.schib) != 0 ||
701 !console_subchannel.schib.pmcw.dnv) 725 !console_subchannel.schib.pmcw.dnv)
702 return -1; 726 return -1;
703 console_devno = console_subchannel.schib.pmcw.dev; 727 console_devno = console_subchannel.schib.pmcw.dev;
704 } else if (console_devno != -1) { 728 } else if (console_devno != -1) {
705 /* At least the console device number is known. */ 729 /* At least the console device number is known. */
706 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 730 for_each_subchannel(cio_test_for_console, NULL);
707 if (stsch(irq, &console_subchannel.schib) != 0)
708 break;
709 if (console_subchannel.schib.pmcw.dnv &&
710 console_subchannel.schib.pmcw.dev ==
711 console_devno) {
712 console_irq = irq;
713 break;
714 }
715 }
716 if (console_irq == -1) 731 if (console_irq == -1)
717 return -1; 732 return -1;
718 } else { 733 } else {
@@ -728,17 +743,20 @@ cio_console_irq(void)
728struct subchannel * 743struct subchannel *
729cio_probe_console(void) 744cio_probe_console(void)
730{ 745{
731 int irq, ret; 746 int sch_no, ret;
747 struct subchannel_id schid;
732 748
733 if (xchg(&console_subchannel_in_use, 1) != 0) 749 if (xchg(&console_subchannel_in_use, 1) != 0)
734 return ERR_PTR(-EBUSY); 750 return ERR_PTR(-EBUSY);
735 irq = cio_console_irq(); 751 sch_no = cio_get_console_sch_no();
736 if (irq == -1) { 752 if (sch_no == -1) {
737 console_subchannel_in_use = 0; 753 console_subchannel_in_use = 0;
738 return ERR_PTR(-ENODEV); 754 return ERR_PTR(-ENODEV);
739 } 755 }
740 memset(&console_subchannel, 0, sizeof(struct subchannel)); 756 memset(&console_subchannel, 0, sizeof(struct subchannel));
741 ret = cio_validate_subchannel(&console_subchannel, irq); 757 init_subchannel_id(&schid);
758 schid.sch_no = sch_no;
759 ret = cio_validate_subchannel(&console_subchannel, schid);
742 if (ret) { 760 if (ret) {
743 console_subchannel_in_use = 0; 761 console_subchannel_in_use = 0;
744 return ERR_PTR(-ENODEV); 762 return ERR_PTR(-ENODEV);
@@ -770,11 +788,11 @@ cio_release_console(void)
770 788
771/* Bah... hack to catch console special sausages. */ 789/* Bah... hack to catch console special sausages. */
772int 790int
773cio_is_console(int irq) 791cio_is_console(struct subchannel_id schid)
774{ 792{
775 if (!console_subchannel_in_use) 793 if (!console_subchannel_in_use)
776 return 0; 794 return 0;
777 return (irq == console_subchannel.irq); 795 return schid_equal(&schid, &console_subchannel.schid);
778} 796}
779 797
780struct subchannel * 798struct subchannel *
@@ -787,7 +805,7 @@ cio_get_console_subchannel(void)
787 805
788#endif 806#endif
789static inline int 807static inline int
790__disable_subchannel_easy(unsigned int schid, struct schib *schib) 808__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
791{ 809{
792 int retry, cc; 810 int retry, cc;
793 811
@@ -805,7 +823,7 @@ __disable_subchannel_easy(unsigned int schid, struct schib *schib)
805} 823}
806 824
807static inline int 825static inline int
808__clear_subchannel_easy(unsigned int schid) 826__clear_subchannel_easy(struct subchannel_id schid)
809{ 827{
810 int retry; 828 int retry;
811 829
@@ -815,8 +833,8 @@ __clear_subchannel_easy(unsigned int schid)
815 struct tpi_info ti; 833 struct tpi_info ti;
816 834
817 if (tpi(&ti)) { 835 if (tpi(&ti)) {
818 tsch(ti.irq, (struct irb *)__LC_IRB); 836 tsch(ti.schid, (struct irb *)__LC_IRB);
819 if (ti.irq == schid) 837 if (schid_equal(&ti.schid, &schid))
820 return 0; 838 return 0;
821 } 839 }
822 udelay(100); 840 udelay(100);
@@ -825,31 +843,33 @@ __clear_subchannel_easy(unsigned int schid)
825} 843}
826 844
827extern void do_reipl(unsigned long devno); 845extern void do_reipl(unsigned long devno);
846static int
847__shutdown_subchannel_easy(struct subchannel_id schid, void *data)
848{
849 struct schib schib;
850
851 if (stsch_err(schid, &schib))
852 return -ENXIO;
853 if (!schib.pmcw.ena)
854 return 0;
855 switch(__disable_subchannel_easy(schid, &schib)) {
856 case 0:
857 case -ENODEV:
858 break;
859 default: /* -EBUSY */
860 if (__clear_subchannel_easy(schid))
861 break; /* give up... */
862 stsch(schid, &schib);
863 __disable_subchannel_easy(schid, &schib);
864 }
865 return 0;
866}
828 867
829/* Clear all subchannels. */
830void 868void
831clear_all_subchannels(void) 869clear_all_subchannels(void)
832{ 870{
833 unsigned int schid;
834
835 local_irq_disable(); 871 local_irq_disable();
836 for (schid=0;schid<=highest_subchannel;schid++) { 872 for_each_subchannel(__shutdown_subchannel_easy, NULL);
837 struct schib schib;
838 if (stsch(schid, &schib))
839 break; /* break out of the loop */
840 if (!schib.pmcw.ena)
841 continue;
842 switch(__disable_subchannel_easy(schid, &schib)) {
843 case 0:
844 case -ENODEV:
845 break;
846 default: /* -EBUSY */
847 if (__clear_subchannel_easy(schid))
848 break; /* give up... jump out of switch */
849 stsch(schid, &schib);
850 __disable_subchannel_easy(schid, &schib);
851 }
852 }
853} 873}
854 874
855/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 875/* Make sure all subchannels are quiet before we re-ipl an lpar. */
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index c50a9da420a9..0ca987344e07 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -1,6 +1,8 @@
1#ifndef S390_CIO_H 1#ifndef S390_CIO_H
2#define S390_CIO_H 2#define S390_CIO_H
3 3
4#include "schid.h"
5
4/* 6/*
5 * where we put the ssd info 7 * where we put the ssd info
6 */ 8 */
@@ -83,7 +85,7 @@ struct orb {
83 85
84/* subchannel data structure used by I/O subroutines */ 86/* subchannel data structure used by I/O subroutines */
85struct subchannel { 87struct subchannel {
86 unsigned int irq; /* aka. subchannel number */ 88 struct subchannel_id schid;
87 spinlock_t lock; /* subchannel lock */ 89 spinlock_t lock; /* subchannel lock */
88 90
89 enum { 91 enum {
@@ -114,7 +116,7 @@ struct subchannel {
114 116
115#define to_subchannel(n) container_of(n, struct subchannel, dev) 117#define to_subchannel(n) container_of(n, struct subchannel, dev)
116 118
117extern int cio_validate_subchannel (struct subchannel *, unsigned int); 119extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
118extern int cio_enable_subchannel (struct subchannel *, unsigned int); 120extern int cio_enable_subchannel (struct subchannel *, unsigned int);
119extern int cio_disable_subchannel (struct subchannel *); 121extern int cio_disable_subchannel (struct subchannel *);
120extern int cio_cancel (struct subchannel *); 122extern int cio_cancel (struct subchannel *);
@@ -127,14 +129,15 @@ extern int cio_cancel (struct subchannel *);
127extern int cio_set_options (struct subchannel *, int); 129extern int cio_set_options (struct subchannel *, int);
128extern int cio_get_options (struct subchannel *); 130extern int cio_get_options (struct subchannel *);
129extern int cio_modify (struct subchannel *); 131extern int cio_modify (struct subchannel *);
132
130/* Use with care. */ 133/* Use with care. */
131#ifdef CONFIG_CCW_CONSOLE 134#ifdef CONFIG_CCW_CONSOLE
132extern struct subchannel *cio_probe_console(void); 135extern struct subchannel *cio_probe_console(void);
133extern void cio_release_console(void); 136extern void cio_release_console(void);
134extern int cio_is_console(int irq); 137extern int cio_is_console(struct subchannel_id);
135extern struct subchannel *cio_get_console_subchannel(void); 138extern struct subchannel *cio_get_console_subchannel(void);
136#else 139#else
137#define cio_is_console(irq) 0 140#define cio_is_console(schid) 0
138#define cio_get_console_subchannel() NULL 141#define cio_get_console_subchannel() NULL
139#endif 142#endif
140 143
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index b978f7fe8327..0b03714e696a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $) 2 * linux/drivers/s390/cio/cmf.c ($Revision: 1.19 $)
3 * 3 *
4 * Linux on zSeries Channel Measurement Facility support 4 * Linux on zSeries Channel Measurement Facility support
5 * 5 *
@@ -178,7 +178,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
178 /* msch can silently fail, so do it again if necessary */ 178 /* msch can silently fail, so do it again if necessary */
179 for (retry = 0; retry < 3; retry++) { 179 for (retry = 0; retry < 3; retry++) {
180 /* prepare schib */ 180 /* prepare schib */
181 stsch(sch->irq, schib); 181 stsch(sch->schid, schib);
182 schib->pmcw.mme = mme; 182 schib->pmcw.mme = mme;
183 schib->pmcw.mbfc = mbfc; 183 schib->pmcw.mbfc = mbfc;
184 /* address can be either a block address or a block index */ 184 /* address can be either a block address or a block index */
@@ -188,7 +188,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
188 schib->pmcw.mbi = address; 188 schib->pmcw.mbi = address;
189 189
190 /* try to submit it */ 190 /* try to submit it */
191 switch(ret = msch_err(sch->irq, schib)) { 191 switch(ret = msch_err(sch->schid, schib)) {
192 case 0: 192 case 0:
193 break; 193 break;
194 case 1: 194 case 1:
@@ -202,7 +202,7 @@ set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address)
202 ret = -EINVAL; 202 ret = -EINVAL;
203 break; 203 break;
204 } 204 }
205 stsch(sch->irq, schib); /* restore the schib */ 205 stsch(sch->schid, schib); /* restore the schib */
206 206
207 if (ret) 207 if (ret)
208 break; 208 break;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 555119cacc27..e565193650c7 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/css.c 2 * drivers/s390/cio/css.c
3 * driver for channel subsystem 3 * driver for channel subsystem
4 * $Revision: 1.85 $ 4 * $Revision: 1.93 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -21,19 +21,35 @@
21#include "ioasm.h" 21#include "ioasm.h"
22#include "chsc.h" 22#include "chsc.h"
23 23
24unsigned int highest_subchannel;
25int need_rescan = 0; 24int need_rescan = 0;
26int css_init_done = 0; 25int css_init_done = 0;
26static int max_ssid = 0;
27
28struct channel_subsystem *css[__MAX_CSSID + 1];
27 29
28struct pgid global_pgid;
29int css_characteristics_avail = 0; 30int css_characteristics_avail = 0;
30 31
31struct device css_bus_device = { 32inline int
32 .bus_id = "css0", 33for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
33}; 34{
35 struct subchannel_id schid;
36 int ret;
37
38 init_subchannel_id(&schid);
39 ret = -ENODEV;
40 do {
41 do {
42 ret = fn(schid, data);
43 if (ret)
44 break;
45 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
46 schid.sch_no = 0;
47 } while (schid.ssid++ < max_ssid);
48 return ret;
49}
34 50
35static struct subchannel * 51static struct subchannel *
36css_alloc_subchannel(int irq) 52css_alloc_subchannel(struct subchannel_id schid)
37{ 53{
38 struct subchannel *sch; 54 struct subchannel *sch;
39 int ret; 55 int ret;
@@ -41,13 +57,11 @@ css_alloc_subchannel(int irq)
41 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 57 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
42 if (sch == NULL) 58 if (sch == NULL)
43 return ERR_PTR(-ENOMEM); 59 return ERR_PTR(-ENOMEM);
44 ret = cio_validate_subchannel (sch, irq); 60 ret = cio_validate_subchannel (sch, schid);
45 if (ret < 0) { 61 if (ret < 0) {
46 kfree(sch); 62 kfree(sch);
47 return ERR_PTR(ret); 63 return ERR_PTR(ret);
48 } 64 }
49 if (irq > highest_subchannel)
50 highest_subchannel = irq;
51 65
52 if (sch->st != SUBCHANNEL_TYPE_IO) { 66 if (sch->st != SUBCHANNEL_TYPE_IO) {
53 /* For now we ignore all non-io subchannels. */ 67 /* For now we ignore all non-io subchannels. */
@@ -87,7 +101,7 @@ css_subchannel_release(struct device *dev)
87 struct subchannel *sch; 101 struct subchannel *sch;
88 102
89 sch = to_subchannel(dev); 103 sch = to_subchannel(dev);
90 if (!cio_is_console(sch->irq)) 104 if (!cio_is_console(sch->schid))
91 kfree(sch); 105 kfree(sch);
92} 106}
93 107
@@ -99,7 +113,7 @@ css_register_subchannel(struct subchannel *sch)
99 int ret; 113 int ret;
100 114
101 /* Initialize the subchannel structure */ 115 /* Initialize the subchannel structure */
102 sch->dev.parent = &css_bus_device; 116 sch->dev.parent = &css[0]->device;
103 sch->dev.bus = &css_bus_type; 117 sch->dev.bus = &css_bus_type;
104 sch->dev.release = &css_subchannel_release; 118 sch->dev.release = &css_subchannel_release;
105 119
@@ -114,12 +128,12 @@ css_register_subchannel(struct subchannel *sch)
114} 128}
115 129
116int 130int
117css_probe_device(int irq) 131css_probe_device(struct subchannel_id schid)
118{ 132{
119 int ret; 133 int ret;
120 struct subchannel *sch; 134 struct subchannel *sch;
121 135
122 sch = css_alloc_subchannel(irq); 136 sch = css_alloc_subchannel(schid);
123 if (IS_ERR(sch)) 137 if (IS_ERR(sch))
124 return PTR_ERR(sch); 138 return PTR_ERR(sch);
125 ret = css_register_subchannel(sch); 139 ret = css_register_subchannel(sch);
@@ -132,26 +146,26 @@ static int
132check_subchannel(struct device * dev, void * data) 146check_subchannel(struct device * dev, void * data)
133{ 147{
134 struct subchannel *sch; 148 struct subchannel *sch;
135 int irq = (unsigned long)data; 149 struct subchannel_id *schid = data;
136 150
137 sch = to_subchannel(dev); 151 sch = to_subchannel(dev);
138 return (sch->irq == irq); 152 return schid_equal(&sch->schid, schid);
139} 153}
140 154
141struct subchannel * 155struct subchannel *
142get_subchannel_by_schid(int irq) 156get_subchannel_by_schid(struct subchannel_id schid)
143{ 157{
144 struct device *dev; 158 struct device *dev;
145 159
146 dev = bus_find_device(&css_bus_type, NULL, 160 dev = bus_find_device(&css_bus_type, NULL,
147 (void *)(unsigned long)irq, check_subchannel); 161 (void *)&schid, check_subchannel);
148 162
149 return dev ? to_subchannel(dev) : NULL; 163 return dev ? to_subchannel(dev) : NULL;
150} 164}
151 165
152 166
153static inline int 167static inline int
154css_get_subchannel_status(struct subchannel *sch, int schid) 168css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
155{ 169{
156 struct schib schib; 170 struct schib schib;
157 int cc; 171 int cc;
@@ -170,13 +184,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid)
170} 184}
171 185
172static int 186static int
173css_evaluate_subchannel(int irq, int slow) 187css_evaluate_subchannel(struct subchannel_id schid, int slow)
174{ 188{
175 int event, ret, disc; 189 int event, ret, disc;
176 struct subchannel *sch; 190 struct subchannel *sch;
177 unsigned long flags; 191 unsigned long flags;
178 192
179 sch = get_subchannel_by_schid(irq); 193 sch = get_subchannel_by_schid(schid);
180 disc = sch ? device_is_disconnected(sch) : 0; 194 disc = sch ? device_is_disconnected(sch) : 0;
181 if (disc && slow) { 195 if (disc && slow) {
182 if (sch) 196 if (sch)
@@ -194,9 +208,10 @@ css_evaluate_subchannel(int irq, int slow)
194 put_device(&sch->dev); 208 put_device(&sch->dev);
195 return -EAGAIN; /* Will be done on the slow path. */ 209 return -EAGAIN; /* Will be done on the slow path. */
196 } 210 }
197 event = css_get_subchannel_status(sch, irq); 211 event = css_get_subchannel_status(sch, schid);
198 CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", 212 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
199 irq, event, sch?(disc?"disconnected":"normal"):"unknown", 213 schid.ssid, schid.sch_no, event,
214 sch?(disc?"disconnected":"normal"):"unknown",
200 slow?"slow":"fast"); 215 slow?"slow":"fast");
201 switch (event) { 216 switch (event) {
202 case CIO_NO_PATH: 217 case CIO_NO_PATH:
@@ -253,7 +268,7 @@ css_evaluate_subchannel(int irq, int slow)
253 sch->schib.pmcw.intparm = 0; 268 sch->schib.pmcw.intparm = 0;
254 cio_modify(sch); 269 cio_modify(sch);
255 put_device(&sch->dev); 270 put_device(&sch->dev);
256 ret = css_probe_device(irq); 271 ret = css_probe_device(schid);
257 } else { 272 } else {
258 /* 273 /*
259 * We can't immediately deregister the disconnected 274 * We can't immediately deregister the disconnected
@@ -272,7 +287,7 @@ css_evaluate_subchannel(int irq, int slow)
272 device_trigger_reprobe(sch); 287 device_trigger_reprobe(sch);
273 spin_unlock_irqrestore(&sch->lock, flags); 288 spin_unlock_irqrestore(&sch->lock, flags);
274 } 289 }
275 ret = sch ? 0 : css_probe_device(irq); 290 ret = sch ? 0 : css_probe_device(schid);
276 break; 291 break;
277 default: 292 default:
278 BUG(); 293 BUG();
@@ -281,28 +296,15 @@ css_evaluate_subchannel(int irq, int slow)
281 return ret; 296 return ret;
282} 297}
283 298
284static void 299static int
285css_rescan_devices(void) 300css_rescan_devices(struct subchannel_id schid, void *data)
286{ 301{
287 int irq, ret; 302 return css_evaluate_subchannel(schid, 1);
288
289 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) {
290 ret = css_evaluate_subchannel(irq, 1);
291 /* No more memory. It doesn't make sense to continue. No
292 * panic because this can happen in midflight and just
293 * because we can't use a new device is no reason to crash
294 * the system. */
295 if (ret == -ENOMEM)
296 break;
297 /* -ENXIO indicates that there are no more subchannels. */
298 if (ret == -ENXIO)
299 break;
300 }
301} 303}
302 304
303struct slow_subchannel { 305struct slow_subchannel {
304 struct list_head slow_list; 306 struct list_head slow_list;
305 unsigned long schid; 307 struct subchannel_id schid;
306}; 308};
307 309
308static LIST_HEAD(slow_subchannels_head); 310static LIST_HEAD(slow_subchannels_head);
@@ -315,7 +317,7 @@ css_trigger_slow_path(void)
315 317
316 if (need_rescan) { 318 if (need_rescan) {
317 need_rescan = 0; 319 need_rescan = 0;
318 css_rescan_devices(); 320 for_each_subchannel(css_rescan_devices, NULL);
319 return; 321 return;
320 } 322 }
321 323
@@ -354,23 +356,31 @@ css_reiterate_subchannels(void)
354 * Called from the machine check handler for subchannel report words. 356 * Called from the machine check handler for subchannel report words.
355 */ 357 */
356int 358int
357css_process_crw(int irq) 359css_process_crw(int rsid1, int rsid2)
358{ 360{
359 int ret; 361 int ret;
362 struct subchannel_id mchk_schid;
360 363
361 CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); 364 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n",
365 rsid1, rsid2);
362 366
363 if (need_rescan) 367 if (need_rescan)
364 /* We need to iterate all subchannels anyway. */ 368 /* We need to iterate all subchannels anyway. */
365 return -EAGAIN; 369 return -EAGAIN;
370
371 init_subchannel_id(&mchk_schid);
372 mchk_schid.sch_no = rsid1;
373 if (rsid2 != 0)
374 mchk_schid.ssid = (rsid2 >> 8) & 3;
375
366 /* 376 /*
367 * Since we are always presented with IPI in the CRW, we have to 377 * Since we are always presented with IPI in the CRW, we have to
368 * use stsch() to find out if the subchannel in question has come 378 * use stsch() to find out if the subchannel in question has come
369 * or gone. 379 * or gone.
370 */ 380 */
371 ret = css_evaluate_subchannel(irq, 0); 381 ret = css_evaluate_subchannel(mchk_schid, 0);
372 if (ret == -EAGAIN) { 382 if (ret == -EAGAIN) {
373 if (css_enqueue_subchannel_slow(irq)) { 383 if (css_enqueue_subchannel_slow(mchk_schid)) {
374 css_clear_subchannel_slow_list(); 384 css_clear_subchannel_slow_list();
375 need_rescan = 1; 385 need_rescan = 1;
376 } 386 }
@@ -378,22 +388,83 @@ css_process_crw(int irq)
378 return ret; 388 return ret;
379} 389}
380 390
381static void __init 391static int __init
382css_generate_pgid(void) 392__init_channel_subsystem(struct subchannel_id schid, void *data)
383{ 393{
384 /* Let's build our path group ID here. */ 394 struct subchannel *sch;
385 if (css_characteristics_avail && css_general_characteristics.mcss) 395 int ret;
386 global_pgid.cpu_addr = 0x8000; 396
397 if (cio_is_console(schid))
398 sch = cio_get_console_subchannel();
387 else { 399 else {
400 sch = css_alloc_subchannel(schid);
401 if (IS_ERR(sch))
402 ret = PTR_ERR(sch);
403 else
404 ret = 0;
405 switch (ret) {
406 case 0:
407 break;
408 case -ENOMEM:
409 panic("Out of memory in init_channel_subsystem\n");
410 /* -ENXIO: no more subchannels. */
411 case -ENXIO:
412 return ret;
413 default:
414 return 0;
415 }
416 }
417 /*
418 * We register ALL valid subchannels in ioinfo, even those
419 * that have been present before init_channel_subsystem.
420 * These subchannels can't have been registered yet (kmalloc
421 * not working) so we do it now. This is true e.g. for the
422 * console subchannel.
423 */
424 css_register_subchannel(sch);
425 return 0;
426}
427
428static void __init
429css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
430{
431 if (css_characteristics_avail && css_general_characteristics.mcss) {
432 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
433 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
434 } else {
388#ifdef CONFIG_SMP 435#ifdef CONFIG_SMP
389 global_pgid.cpu_addr = hard_smp_processor_id(); 436 css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id();
390#else 437#else
391 global_pgid.cpu_addr = 0; 438 css->global_pgid.pgid_high.cpu_addr = 0;
392#endif 439#endif
393 } 440 }
394 global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 441 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
395 global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 442 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
396 global_pgid.tod_high = (__u32) (get_clock() >> 32); 443 css->global_pgid.tod_high = tod_high;
444
445}
446
447static void
448channel_subsystem_release(struct device *dev)
449{
450 struct channel_subsystem *css;
451
452 css = to_css(dev);
453 kfree(css);
454}
455
456static inline void __init
457setup_css(int nr)
458{
459 u32 tod_high;
460
461 memset(css[nr], 0, sizeof(struct channel_subsystem));
462 css[nr]->valid = 1;
463 css[nr]->cssid = nr;
464 sprintf(css[nr]->device.bus_id, "css%x", nr);
465 css[nr]->device.release = channel_subsystem_release;
466 tod_high = (u32) (get_clock() >> 32);
467 css_generate_pgid(css[nr], tod_high);
397} 468}
398 469
399/* 470/*
@@ -404,53 +475,50 @@ css_generate_pgid(void)
404static int __init 475static int __init
405init_channel_subsystem (void) 476init_channel_subsystem (void)
406{ 477{
407 int ret, irq; 478 int ret, i;
408 479
409 if (chsc_determine_css_characteristics() == 0) 480 if (chsc_determine_css_characteristics() == 0)
410 css_characteristics_avail = 1; 481 css_characteristics_avail = 1;
411 482
412 css_generate_pgid();
413
414 if ((ret = bus_register(&css_bus_type))) 483 if ((ret = bus_register(&css_bus_type)))
415 goto out; 484 goto out;
416 if ((ret = device_register (&css_bus_device)))
417 goto out_bus;
418 485
486 /* Try to enable MSS. */
487 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
488 switch (ret) {
489 case 0: /* Success. */
490 max_ssid = __MAX_SSID;
491 break;
492 case -ENOMEM:
493 goto out_bus;
494 default:
495 max_ssid = 0;
496 }
497 /* Setup css structure. */
498 for (i = 0; i <= __MAX_CSSID; i++) {
499 css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
500 if (!css[i]) {
501 ret = -ENOMEM;
502 goto out_unregister;
503 }
504 setup_css(i);
505 ret = device_register(&css[i]->device);
506 if (ret)
507 goto out_free;
508 }
419 css_init_done = 1; 509 css_init_done = 1;
420 510
421 ctl_set_bit(6, 28); 511 ctl_set_bit(6, 28);
422 512
423 for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { 513 for_each_subchannel(__init_channel_subsystem, NULL);
424 struct subchannel *sch;
425
426 if (cio_is_console(irq))
427 sch = cio_get_console_subchannel();
428 else {
429 sch = css_alloc_subchannel(irq);
430 if (IS_ERR(sch))
431 ret = PTR_ERR(sch);
432 else
433 ret = 0;
434 if (ret == -ENOMEM)
435 panic("Out of memory in "
436 "init_channel_subsystem\n");
437 /* -ENXIO: no more subchannels. */
438 if (ret == -ENXIO)
439 break;
440 if (ret)
441 continue;
442 }
443 /*
444 * We register ALL valid subchannels in ioinfo, even those
445 * that have been present before init_channel_subsystem.
446 * These subchannels can't have been registered yet (kmalloc
447 * not working) so we do it now. This is true e.g. for the
448 * console subchannel.
449 */
450 css_register_subchannel(sch);
451 }
452 return 0; 514 return 0;
453 515out_free:
516 kfree(css[i]);
517out_unregister:
518 while (i > 0) {
519 i--;
520 device_unregister(&css[i]->device);
521 }
454out_bus: 522out_bus:
455 bus_unregister(&css_bus_type); 523 bus_unregister(&css_bus_type);
456out: 524out:
@@ -481,47 +549,8 @@ struct bus_type css_bus_type = {
481 549
482subsys_initcall(init_channel_subsystem); 550subsys_initcall(init_channel_subsystem);
483 551
484/*
485 * Register root devices for some drivers. The release function must not be
486 * in the device drivers, so we do it here.
487 */
488static void
489s390_root_dev_release(struct device *dev)
490{
491 kfree(dev);
492}
493
494struct device *
495s390_root_dev_register(const char *name)
496{
497 struct device *dev;
498 int ret;
499
500 if (!strlen(name))
501 return ERR_PTR(-EINVAL);
502 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
503 if (!dev)
504 return ERR_PTR(-ENOMEM);
505 memset(dev, 0, sizeof(struct device));
506 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
507 dev->release = s390_root_dev_release;
508 ret = device_register(dev);
509 if (ret) {
510 kfree(dev);
511 return ERR_PTR(ret);
512 }
513 return dev;
514}
515
516void
517s390_root_dev_unregister(struct device *dev)
518{
519 if (dev)
520 device_unregister(dev);
521}
522
523int 552int
524css_enqueue_subchannel_slow(unsigned long schid) 553css_enqueue_subchannel_slow(struct subchannel_id schid)
525{ 554{
526 struct slow_subchannel *new_slow_sch; 555 struct slow_subchannel *new_slow_sch;
527 unsigned long flags; 556 unsigned long flags;
@@ -564,6 +593,4 @@ css_slow_subchannels_exist(void)
564 593
565MODULE_LICENSE("GPL"); 594MODULE_LICENSE("GPL");
566EXPORT_SYMBOL(css_bus_type); 595EXPORT_SYMBOL(css_bus_type);
567EXPORT_SYMBOL(s390_root_dev_register);
568EXPORT_SYMBOL(s390_root_dev_unregister);
569EXPORT_SYMBOL_GPL(css_characteristics_avail); 596EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 2004a6c49388..251ebd7a7d3a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -6,6 +6,8 @@
6 6
7#include <asm/cio.h> 7#include <asm/cio.h>
8 8
9#include "schid.h"
10
9/* 11/*
10 * path grouping stuff 12 * path grouping stuff
11 */ 13 */
@@ -33,19 +35,25 @@ struct path_state {
33 __u8 resvd : 3; /* reserved */ 35 __u8 resvd : 3; /* reserved */
34} __attribute__ ((packed)); 36} __attribute__ ((packed));
35 37
38struct extended_cssid {
39 u8 version;
40 u8 cssid;
41} __attribute__ ((packed));
42
36struct pgid { 43struct pgid {
37 union { 44 union {
38 __u8 fc; /* SPID function code */ 45 __u8 fc; /* SPID function code */
39 struct path_state ps; /* SNID path state */ 46 struct path_state ps; /* SNID path state */
40 } inf; 47 } inf;
41 __u32 cpu_addr : 16; /* CPU address */ 48 union {
49 __u32 cpu_addr : 16; /* CPU address */
50 struct extended_cssid ext_cssid;
51 } pgid_high;
42 __u32 cpu_id : 24; /* CPU identification */ 52 __u32 cpu_id : 24; /* CPU identification */
43 __u32 cpu_model : 16; /* CPU model */ 53 __u32 cpu_model : 16; /* CPU model */
44 __u32 tod_high; /* high word TOD clock */ 54 __u32 tod_high; /* high word TOD clock */
45} __attribute__ ((packed)); 55} __attribute__ ((packed));
46 56
47extern struct pgid global_pgid;
48
49#define MAX_CIWS 8 57#define MAX_CIWS 8
50 58
51/* 59/*
@@ -68,7 +76,8 @@ struct ccw_device_private {
68 atomic_t onoff; 76 atomic_t onoff;
69 unsigned long registered; 77 unsigned long registered;
70 __u16 devno; /* device number */ 78 __u16 devno; /* device number */
71 __u16 irq; /* subchannel number */ 79 __u16 sch_no; /* subchannel number */
80 __u8 ssid; /* subchannel set id */
72 __u8 imask; /* lpm mask for SNID/SID/SPGID */ 81 __u8 imask; /* lpm mask for SNID/SID/SPGID */
73 int iretry; /* retry counter SNID/SID/SPGID */ 82 int iretry; /* retry counter SNID/SID/SPGID */
74 struct { 83 struct {
@@ -121,15 +130,27 @@ struct css_driver {
121extern struct bus_type css_bus_type; 130extern struct bus_type css_bus_type;
122extern struct css_driver io_subchannel_driver; 131extern struct css_driver io_subchannel_driver;
123 132
124int css_probe_device(int irq); 133extern int css_probe_device(struct subchannel_id);
125extern struct subchannel * get_subchannel_by_schid(int irq); 134extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
126extern unsigned int highest_subchannel;
127extern int css_init_done; 135extern int css_init_done;
128 136extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
129#define __MAX_SUBCHANNELS 65536 137
138#define __MAX_SUBCHANNEL 65535
139#define __MAX_SSID 3
140#define __MAX_CHPID 255
141#define __MAX_CSSID 0
142
143struct channel_subsystem {
144 u8 cssid;
145 int valid;
146 struct channel_path *chps[__MAX_CHPID];
147 struct device device;
148 struct pgid global_pgid;
149};
150#define to_css(dev) container_of(dev, struct channel_subsystem, device)
130 151
131extern struct bus_type css_bus_type; 152extern struct bus_type css_bus_type;
132extern struct device css_bus_device; 153extern struct channel_subsystem *css[];
133 154
134/* Some helper functions for disconnected state. */ 155/* Some helper functions for disconnected state. */
135int device_is_disconnected(struct subchannel *); 156int device_is_disconnected(struct subchannel *);
@@ -144,7 +165,7 @@ void device_set_waiting(struct subchannel *);
144void device_kill_pending_timer(struct subchannel *); 165void device_kill_pending_timer(struct subchannel *);
145 166
146/* Helper functions to build lists for the slow path. */ 167/* Helper functions to build lists for the slow path. */
147int css_enqueue_subchannel_slow(unsigned long schid); 168extern int css_enqueue_subchannel_slow(struct subchannel_id schid);
148void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); 169void css_walk_subchannel_slow_list(void (*fn)(unsigned long));
149void css_clear_subchannel_slow_list(void); 170void css_clear_subchannel_slow_list(void);
150int css_slow_subchannels_exist(void); 171int css_slow_subchannels_exist(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 811c9d150637..fa3e4c0a2536 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * $Revision: 1.131 $ 4 * $Revision: 1.137 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -59,7 +59,7 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
59 * Heavily modeled on pci and usb hotplug. 59 * Heavily modeled on pci and usb hotplug.
60 */ 60 */
61static int 61static int
62ccw_hotplug (struct device *dev, char **envp, int num_envp, 62ccw_uevent (struct device *dev, char **envp, int num_envp,
63 char *buffer, int buffer_size) 63 char *buffer, int buffer_size)
64{ 64{
65 struct ccw_device *cdev = to_ccwdev(dev); 65 struct ccw_device *cdev = to_ccwdev(dev);
@@ -110,7 +110,7 @@ ccw_hotplug (struct device *dev, char **envp, int num_envp,
110struct bus_type ccw_bus_type = { 110struct bus_type ccw_bus_type = {
111 .name = "ccw", 111 .name = "ccw",
112 .match = &ccw_bus_match, 112 .match = &ccw_bus_match,
113 .hotplug = &ccw_hotplug, 113 .uevent = &ccw_uevent,
114}; 114};
115 115
116static int io_subchannel_probe (struct device *); 116static int io_subchannel_probe (struct device *);
@@ -374,7 +374,7 @@ online_store (struct device *dev, struct device_attribute *attr, const char *buf
374 int i, force, ret; 374 int i, force, ret;
375 char *tmp; 375 char *tmp;
376 376
377 if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) 377 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
378 return -EAGAIN; 378 return -EAGAIN;
379 379
380 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 380 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -535,7 +535,8 @@ ccw_device_register(struct ccw_device *cdev)
535} 535}
536 536
537struct match_data { 537struct match_data {
538 unsigned int devno; 538 unsigned int devno;
539 unsigned int ssid;
539 struct ccw_device * sibling; 540 struct ccw_device * sibling;
540}; 541};
541 542
@@ -548,6 +549,7 @@ match_devno(struct device * dev, void * data)
548 cdev = to_ccwdev(dev); 549 cdev = to_ccwdev(dev);
549 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 550 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
550 (cdev->private->devno == d->devno) && 551 (cdev->private->devno == d->devno) &&
552 (cdev->private->ssid == d->ssid) &&
551 (cdev != d->sibling)) { 553 (cdev != d->sibling)) {
552 cdev->private->state = DEV_STATE_NOT_OPER; 554 cdev->private->state = DEV_STATE_NOT_OPER;
553 return 1; 555 return 1;
@@ -556,11 +558,13 @@ match_devno(struct device * dev, void * data)
556} 558}
557 559
558static struct ccw_device * 560static struct ccw_device *
559get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) 561get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
562 struct ccw_device *sibling)
560{ 563{
561 struct device *dev; 564 struct device *dev;
562 struct match_data data = { 565 struct match_data data = {
563 .devno = devno, 566 .devno = devno,
567 .ssid = ssid,
564 .sibling = sibling, 568 .sibling = sibling,
565 }; 569 };
566 570
@@ -616,13 +620,13 @@ ccw_device_do_unreg_rereg(void *data)
616 620
617 need_rename = 1; 621 need_rename = 1;
618 other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, 622 other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
619 cdev); 623 sch->schid.ssid, cdev);
620 if (other_cdev) { 624 if (other_cdev) {
621 struct subchannel *other_sch; 625 struct subchannel *other_sch;
622 626
623 other_sch = to_subchannel(other_cdev->dev.parent); 627 other_sch = to_subchannel(other_cdev->dev.parent);
624 if (get_device(&other_sch->dev)) { 628 if (get_device(&other_sch->dev)) {
625 stsch(other_sch->irq, &other_sch->schib); 629 stsch(other_sch->schid, &other_sch->schib);
626 if (other_sch->schib.pmcw.dnv) { 630 if (other_sch->schib.pmcw.dnv) {
627 other_sch->schib.pmcw.intparm = 0; 631 other_sch->schib.pmcw.intparm = 0;
628 cio_modify(other_sch); 632 cio_modify(other_sch);
@@ -639,8 +643,8 @@ ccw_device_do_unreg_rereg(void *data)
639 if (test_and_clear_bit(1, &cdev->private->registered)) 643 if (test_and_clear_bit(1, &cdev->private->registered))
640 device_del(&cdev->dev); 644 device_del(&cdev->dev);
641 if (need_rename) 645 if (need_rename)
642 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", 646 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
643 sch->schib.pmcw.dev); 647 sch->schid.ssid, sch->schib.pmcw.dev);
644 PREPARE_WORK(&cdev->private->kick_work, 648 PREPARE_WORK(&cdev->private->kick_work,
645 ccw_device_add_changed, (void *)cdev); 649 ccw_device_add_changed, (void *)cdev);
646 queue_work(ccw_device_work, &cdev->private->kick_work); 650 queue_work(ccw_device_work, &cdev->private->kick_work);
@@ -769,18 +773,20 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
769 sch->dev.driver_data = cdev; 773 sch->dev.driver_data = cdev;
770 sch->driver = &io_subchannel_driver; 774 sch->driver = &io_subchannel_driver;
771 cdev->ccwlock = &sch->lock; 775 cdev->ccwlock = &sch->lock;
776
772 /* Init private data. */ 777 /* Init private data. */
773 priv = cdev->private; 778 priv = cdev->private;
774 priv->devno = sch->schib.pmcw.dev; 779 priv->devno = sch->schib.pmcw.dev;
775 priv->irq = sch->irq; 780 priv->ssid = sch->schid.ssid;
781 priv->sch_no = sch->schid.sch_no;
776 priv->state = DEV_STATE_NOT_OPER; 782 priv->state = DEV_STATE_NOT_OPER;
777 INIT_LIST_HEAD(&priv->cmb_list); 783 INIT_LIST_HEAD(&priv->cmb_list);
778 init_waitqueue_head(&priv->wait_q); 784 init_waitqueue_head(&priv->wait_q);
779 init_timer(&priv->timer); 785 init_timer(&priv->timer);
780 786
781 /* Set an initial name for the device. */ 787 /* Set an initial name for the device. */
782 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", 788 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
783 sch->schib.pmcw.dev); 789 sch->schid.ssid, sch->schib.pmcw.dev);
784 790
785 /* Increase counter of devices currently in recognition. */ 791 /* Increase counter of devices currently in recognition. */
786 atomic_inc(&ccw_device_init_count); 792 atomic_inc(&ccw_device_init_count);
@@ -951,7 +957,7 @@ io_subchannel_shutdown(struct device *dev)
951 sch = to_subchannel(dev); 957 sch = to_subchannel(dev);
952 cdev = dev->driver_data; 958 cdev = dev->driver_data;
953 959
954 if (cio_is_console(sch->irq)) 960 if (cio_is_console(sch->schid))
955 return; 961 return;
956 if (!sch->schib.pmcw.ena) 962 if (!sch->schib.pmcw.ena)
957 /* Nothing to do. */ 963 /* Nothing to do. */
@@ -986,10 +992,6 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
986 cdev->dev = (struct device) { 992 cdev->dev = (struct device) {
987 .parent = &sch->dev, 993 .parent = &sch->dev,
988 }; 994 };
989 /* Initialize the subchannel structure */
990 sch->dev.parent = &css_bus_device;
991 sch->dev.bus = &css_bus_type;
992
993 rc = io_subchannel_recog(cdev, sch); 995 rc = io_subchannel_recog(cdev, sch);
994 if (rc) 996 if (rc)
995 return rc; 997 return rc;
@@ -1146,6 +1148,16 @@ ccw_driver_unregister (struct ccw_driver *cdriver)
1146 driver_unregister(&cdriver->driver); 1148 driver_unregister(&cdriver->driver);
1147} 1149}
1148 1150
1151/* Helper func for qdio. */
1152struct subchannel_id
1153ccw_device_get_subchannel_id(struct ccw_device *cdev)
1154{
1155 struct subchannel *sch;
1156
1157 sch = to_subchannel(cdev->dev.parent);
1158 return sch->schid;
1159}
1160
1149MODULE_LICENSE("GPL"); 1161MODULE_LICENSE("GPL");
1150EXPORT_SYMBOL(ccw_device_set_online); 1162EXPORT_SYMBOL(ccw_device_set_online);
1151EXPORT_SYMBOL(ccw_device_set_offline); 1163EXPORT_SYMBOL(ccw_device_set_offline);
@@ -1155,3 +1167,4 @@ EXPORT_SYMBOL(get_ccwdev_by_busid);
1155EXPORT_SYMBOL(ccw_bus_type); 1167EXPORT_SYMBOL(ccw_bus_type);
1156EXPORT_SYMBOL(ccw_device_work); 1168EXPORT_SYMBOL(ccw_device_work);
1157EXPORT_SYMBOL(ccw_device_notify_work); 1169EXPORT_SYMBOL(ccw_device_notify_work);
1170EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index a3aa056d7245..11587ebb7289 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -110,6 +110,7 @@ int ccw_device_stlck(struct ccw_device *);
110 110
111/* qdio needs this. */ 111/* qdio needs this. */
112void ccw_device_set_timeout(struct ccw_device *, int); 112void ccw_device_set_timeout(struct ccw_device *, int);
113extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
113 114
114void retry_set_schib(struct ccw_device *cdev); 115void retry_set_schib(struct ccw_device *cdev);
115#endif 116#endif
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c1c89f4fd4e3..23d12b65e5fa 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -133,7 +133,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
133 int ret; 133 int ret;
134 134
135 sch = to_subchannel(cdev->dev.parent); 135 sch = to_subchannel(cdev->dev.parent);
136 ret = stsch(sch->irq, &sch->schib); 136 ret = stsch(sch->schid, &sch->schib);
137 if (ret || !sch->schib.pmcw.dnv) 137 if (ret || !sch->schib.pmcw.dnv)
138 return -ENODEV; 138 return -ENODEV;
139 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) 139 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
@@ -231,7 +231,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
231 * through ssch() and the path information is up to date. 231 * through ssch() and the path information is up to date.
232 */ 232 */
233 old_lpm = sch->lpm; 233 old_lpm = sch->lpm;
234 stsch(sch->irq, &sch->schib); 234 stsch(sch->schid, &sch->schib);
235 sch->lpm = sch->schib.pmcw.pim & 235 sch->lpm = sch->schib.pmcw.pim &
236 sch->schib.pmcw.pam & 236 sch->schib.pmcw.pam &
237 sch->schib.pmcw.pom & 237 sch->schib.pmcw.pom &
@@ -257,8 +257,9 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
257 switch (state) { 257 switch (state) {
258 case DEV_STATE_NOT_OPER: 258 case DEV_STATE_NOT_OPER:
259 CIO_DEBUG(KERN_WARNING, 2, 259 CIO_DEBUG(KERN_WARNING, 2,
260 "SenseID : unknown device %04x on subchannel %04x\n", 260 "SenseID : unknown device %04x on subchannel "
261 cdev->private->devno, sch->irq); 261 "0.%x.%04x\n", cdev->private->devno,
262 sch->schid.ssid, sch->schid.sch_no);
262 break; 263 break;
263 case DEV_STATE_OFFLINE: 264 case DEV_STATE_OFFLINE:
264 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 265 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
@@ -282,16 +283,18 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
282 return; 283 return;
283 } 284 }
284 /* Issue device info message. */ 285 /* Issue device info message. */
285 CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " 286 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
286 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 287 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
287 "%04X/%02X\n", cdev->private->devno, 288 "%04X/%02X\n",
289 cdev->private->ssid, cdev->private->devno,
288 cdev->id.cu_type, cdev->id.cu_model, 290 cdev->id.cu_type, cdev->id.cu_model,
289 cdev->id.dev_type, cdev->id.dev_model); 291 cdev->id.dev_type, cdev->id.dev_model);
290 break; 292 break;
291 case DEV_STATE_BOXED: 293 case DEV_STATE_BOXED:
292 CIO_DEBUG(KERN_WARNING, 2, 294 CIO_DEBUG(KERN_WARNING, 2,
293 "SenseID : boxed device %04x on subchannel %04x\n", 295 "SenseID : boxed device %04x on subchannel "
294 cdev->private->devno, sch->irq); 296 "0.%x.%04x\n", cdev->private->devno,
297 sch->schid.ssid, sch->schid.sch_no);
295 break; 298 break;
296 } 299 }
297 cdev->private->state = state; 300 cdev->private->state = state;
@@ -359,7 +362,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
359 if (state == DEV_STATE_BOXED) 362 if (state == DEV_STATE_BOXED)
360 CIO_DEBUG(KERN_WARNING, 2, 363 CIO_DEBUG(KERN_WARNING, 2,
361 "Boxed device %04x on subchannel %04x\n", 364 "Boxed device %04x on subchannel %04x\n",
362 cdev->private->devno, sch->irq); 365 cdev->private->devno, sch->schid.sch_no);
363 366
364 if (cdev->private->flags.donotify) { 367 if (cdev->private->flags.donotify) {
365 cdev->private->flags.donotify = 0; 368 cdev->private->flags.donotify = 0;
@@ -592,7 +595,7 @@ ccw_device_offline(struct ccw_device *cdev)
592 struct subchannel *sch; 595 struct subchannel *sch;
593 596
594 sch = to_subchannel(cdev->dev.parent); 597 sch = to_subchannel(cdev->dev.parent);
595 if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv) 598 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
596 return -ENODEV; 599 return -ENODEV;
597 if (cdev->private->state != DEV_STATE_ONLINE) { 600 if (cdev->private->state != DEV_STATE_ONLINE) {
598 if (sch->schib.scsw.actl != 0) 601 if (sch->schib.scsw.actl != 0)
@@ -711,7 +714,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
711 * Since we might not just be coming from an interrupt from the 714 * Since we might not just be coming from an interrupt from the
712 * subchannel we have to update the schib. 715 * subchannel we have to update the schib.
713 */ 716 */
714 stsch(sch->irq, &sch->schib); 717 stsch(sch->schid, &sch->schib);
715 718
716 if (sch->schib.scsw.actl != 0 || 719 if (sch->schib.scsw.actl != 0 ||
717 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 720 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
@@ -923,7 +926,7 @@ ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
923 926
924 /* Iff device is idle, reset timeout. */ 927 /* Iff device is idle, reset timeout. */
925 sch = to_subchannel(cdev->dev.parent); 928 sch = to_subchannel(cdev->dev.parent);
926 if (!stsch(sch->irq, &sch->schib)) 929 if (!stsch(sch->schid, &sch->schib))
927 if (sch->schib.scsw.actl == 0) 930 if (sch->schib.scsw.actl == 0)
928 ccw_device_set_timeout(cdev, 0); 931 ccw_device_set_timeout(cdev, 0);
929 /* Call the handler. */ 932 /* Call the handler. */
@@ -1035,7 +1038,7 @@ device_trigger_reprobe(struct subchannel *sch)
1035 return; 1038 return;
1036 1039
1037 /* Update some values. */ 1040 /* Update some values. */
1038 if (stsch(sch->irq, &sch->schib)) 1041 if (stsch(sch->schid, &sch->schib))
1039 return; 1042 return;
1040 1043
1041 /* 1044 /*
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 0e68fb511dc9..04ceba343db8 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -27,7 +27,7 @@
27/* 27/*
28 * diag210 is used under VM to get information about a virtual device 28 * diag210 is used under VM to get information about a virtual device
29 */ 29 */
30#ifdef CONFIG_ARCH_S390X 30#ifdef CONFIG_64BIT
31int 31int
32diag210(struct diag210 * addr) 32diag210(struct diag210 * addr)
33{ 33{
@@ -256,16 +256,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
256 * sense id information. So, for intervention required, 256 * sense id information. So, for intervention required,
257 * we use the "whack it until it talks" strategy... 257 * we use the "whack it until it talks" strategy...
258 */ 258 */
259 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " 259 CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel "
260 "reports cmd reject\n", 260 "0.%x.%04x reports cmd reject\n",
261 cdev->private->devno, sch->irq); 261 cdev->private->devno, sch->schid.ssid,
262 sch->schid.sch_no);
262 return -EOPNOTSUPP; 263 return -EOPNOTSUPP;
263 } 264 }
264 if (irb->esw.esw0.erw.cons) { 265 if (irb->esw.esw0.erw.cons) {
265 CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, " 266 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, "
266 "lpum %02X, cnt %02d, sns :" 267 "lpum %02X, cnt %02d, sns :"
267 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 268 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
268 cdev->private->devno, 269 cdev->private->ssid, cdev->private->devno,
269 irb->esw.esw0.sublog.lpum, 270 irb->esw.esw0.sublog.lpum,
270 irb->esw.esw0.erw.scnt, 271 irb->esw.esw0.erw.scnt,
271 irb->ecw[0], irb->ecw[1], 272 irb->ecw[0], irb->ecw[1],
@@ -277,16 +278,17 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
277 if (irb->scsw.cc == 3) { 278 if (irb->scsw.cc == 3) {
278 if ((sch->orb.lpm & 279 if ((sch->orb.lpm &
279 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 280 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
280 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" 281 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
281 " subchannel %04x is 'not operational'\n", 282 "on subchannel 0.%x.%04x is "
282 sch->orb.lpm, cdev->private->devno, 283 "'not operational'\n", sch->orb.lpm,
283 sch->irq); 284 cdev->private->devno, sch->schid.ssid,
285 sch->schid.sch_no);
284 return -EACCES; 286 return -EACCES;
285 } 287 }
286 /* Hmm, whatever happened, try again. */ 288 /* Hmm, whatever happened, try again. */
287 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " 289 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
288 "subchannel %04x returns status %02X%02X\n", 290 "subchannel 0.%x.%04x returns status %02X%02X\n",
289 cdev->private->devno, sch->irq, 291 cdev->private->devno, sch->schid.ssid, sch->schid.sch_no,
290 irb->scsw.dstat, irb->scsw.cstat); 292 irb->scsw.dstat, irb->scsw.cstat);
291 return -EAGAIN; 293 return -EAGAIN;
292} 294}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 85a3026e6900..143b6c25a4e6 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/device_ops.c 2 * drivers/s390/cio/device_ops.c
3 * 3 *
4 * $Revision: 1.57 $ 4 * $Revision: 1.58 $
5 * 5 *
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation 7 * IBM Corporation
@@ -570,7 +570,7 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
570int 570int
571_ccw_device_get_subchannel_number(struct ccw_device *cdev) 571_ccw_device_get_subchannel_number(struct ccw_device *cdev)
572{ 572{
573 return cdev->private->irq; 573 return cdev->private->sch_no;
574} 574}
575 575
576int 576int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 0adac8a67331..052832d03d38 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -22,6 +22,7 @@
22#include "cio_debug.h" 22#include "cio_debug.h"
23#include "css.h" 23#include "css.h"
24#include "device.h" 24#include "device.h"
25#include "ioasm.h"
25 26
26/* 27/*
27 * Start Sense Path Group ID helper function. Used in ccw_device_recog 28 * Start Sense Path Group ID helper function. Used in ccw_device_recog
@@ -56,10 +57,10 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev)
56 if (ret != -EACCES) 57 if (ret != -EACCES)
57 return ret; 58 return ret;
58 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 59 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel "
59 "%04x, lpm %02X, became 'not " 60 "0.%x.%04x, lpm %02X, became 'not "
60 "operational'\n", 61 "operational'\n",
61 cdev->private->devno, sch->irq, 62 cdev->private->devno, sch->schid.ssid,
62 cdev->private->imask); 63 sch->schid.sch_no, cdev->private->imask);
63 64
64 } 65 }
65 cdev->private->imask >>= 1; 66 cdev->private->imask >>= 1;
@@ -105,10 +106,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
105 return -EOPNOTSUPP; 106 return -EOPNOTSUPP;
106 } 107 }
107 if (irb->esw.esw0.erw.cons) { 108 if (irb->esw.esw0.erw.cons) {
108 CIO_MSG_EVENT(2, "SNID - device %04x, unit check, " 109 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
109 "lpum %02X, cnt %02d, sns : " 110 "lpum %02X, cnt %02d, sns : "
110 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", 111 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
111 cdev->private->devno, 112 cdev->private->ssid, cdev->private->devno,
112 irb->esw.esw0.sublog.lpum, 113 irb->esw.esw0.sublog.lpum,
113 irb->esw.esw0.erw.scnt, 114 irb->esw.esw0.erw.scnt,
114 irb->ecw[0], irb->ecw[1], 115 irb->ecw[0], irb->ecw[1],
@@ -118,15 +119,17 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
118 return -EAGAIN; 119 return -EAGAIN;
119 } 120 }
120 if (irb->scsw.cc == 3) { 121 if (irb->scsw.cc == 3) {
121 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " 122 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
122 "%04x, lpm %02X, became 'not operational'\n", 123 " lpm %02X, became 'not operational'\n",
123 cdev->private->devno, sch->irq, sch->orb.lpm); 124 cdev->private->devno, sch->schid.ssid,
125 sch->schid.sch_no, sch->orb.lpm);
124 return -EACCES; 126 return -EACCES;
125 } 127 }
126 if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { 128 if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
127 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " 129 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
128 "is reserved by someone else\n", 130 "is reserved by someone else\n",
129 cdev->private->devno, sch->irq); 131 cdev->private->devno, sch->schid.ssid,
132 sch->schid.sch_no);
130 return -EUSERS; 133 return -EUSERS;
131 } 134 }
132 return 0; 135 return 0;
@@ -162,7 +165,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
162 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ 165 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
163 case 0: /* Sense Path Group ID successful. */ 166 case 0: /* Sense Path Group ID successful. */
164 if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) 167 if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET)
165 memcpy(&cdev->private->pgid, &global_pgid, 168 memcpy(&cdev->private->pgid, &css[0]->global_pgid,
166 sizeof(struct pgid)); 169 sizeof(struct pgid));
167 ccw_device_sense_pgid_done(cdev, 0); 170 ccw_device_sense_pgid_done(cdev, 0);
168 break; 171 break;
@@ -235,8 +238,9 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
235 sch->lpm &= ~cdev->private->imask; 238 sch->lpm &= ~cdev->private->imask;
236 sch->vpm &= ~cdev->private->imask; 239 sch->vpm &= ~cdev->private->imask;
237 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 240 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
238 "%04x, lpm %02X, became 'not operational'\n", 241 "0.%x.%04x, lpm %02X, became 'not operational'\n",
239 cdev->private->devno, sch->irq, cdev->private->imask); 242 cdev->private->devno, sch->schid.ssid,
243 sch->schid.sch_no, cdev->private->imask);
240 return ret; 244 return ret;
241} 245}
242 246
@@ -258,8 +262,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
258 if (irb->ecw[0] & SNS0_CMD_REJECT) 262 if (irb->ecw[0] & SNS0_CMD_REJECT)
259 return -EOPNOTSUPP; 263 return -EOPNOTSUPP;
260 /* Hmm, whatever happened, try again. */ 264 /* Hmm, whatever happened, try again. */
261 CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, " 265 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, "
266 "cnt %02d, "
262 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 267 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
268 cdev->private->ssid,
263 cdev->private->devno, irb->esw.esw0.erw.scnt, 269 cdev->private->devno, irb->esw.esw0.erw.scnt,
264 irb->ecw[0], irb->ecw[1], 270 irb->ecw[0], irb->ecw[1],
265 irb->ecw[2], irb->ecw[3], 271 irb->ecw[2], irb->ecw[3],
@@ -268,10 +274,10 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
268 return -EAGAIN; 274 return -EAGAIN;
269 } 275 }
270 if (irb->scsw.cc == 3) { 276 if (irb->scsw.cc == 3) {
271 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 277 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel 0.%x.%04x,"
272 "%04x, lpm %02X, became 'not operational'\n", 278 " lpm %02X, became 'not operational'\n",
273 cdev->private->devno, sch->irq, 279 cdev->private->devno, sch->schid.ssid,
274 cdev->private->imask); 280 sch->schid.sch_no, cdev->private->imask);
275 return -EACCES; 281 return -EACCES;
276 } 282 }
277 return 0; 283 return 0;
@@ -364,8 +370,22 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
364void 370void
365ccw_device_verify_start(struct ccw_device *cdev) 371ccw_device_verify_start(struct ccw_device *cdev)
366{ 372{
373 struct subchannel *sch = to_subchannel(cdev->dev.parent);
374
367 cdev->private->flags.pgid_single = 0; 375 cdev->private->flags.pgid_single = 0;
368 cdev->private->iretry = 5; 376 cdev->private->iretry = 5;
377 /*
378 * Update sch->lpm with current values to catch paths becoming
379 * available again.
380 */
381 if (stsch(sch->schid, &sch->schib)) {
382 ccw_device_verify_done(cdev, -ENODEV);
383 return;
384 }
385 sch->lpm = sch->schib.pmcw.pim &
386 sch->schib.pmcw.pam &
387 sch->schib.pmcw.pom &
388 sch->opm;
369 __ccw_device_verify_start(cdev); 389 __ccw_device_verify_start(cdev);
370} 390}
371 391
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 12a24d4331a2..db09c209098b 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -36,15 +36,16 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
36 36
37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 37 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
38 "received" 38 "received"
39 " ... device %04X on subchannel %04X, dev_stat " 39 " ... device %04x on subchannel 0.%x.%04x, dev_stat "
40 ": %02X sch_stat : %02X\n", 40 ": %02X sch_stat : %02X\n",
41 cdev->private->devno, cdev->private->irq, 41 cdev->private->devno, cdev->private->ssid,
42 cdev->private->sch_no,
42 irb->scsw.dstat, irb->scsw.cstat); 43 irb->scsw.dstat, irb->scsw.cstat);
43 44
44 if (irb->scsw.cc != 3) { 45 if (irb->scsw.cc != 3) {
45 char dbf_text[15]; 46 char dbf_text[15];
46 47
47 sprintf(dbf_text, "chk%x", cdev->private->irq); 48 sprintf(dbf_text, "chk%x", cdev->private->sch_no);
48 CIO_TRACE_EVENT(0, dbf_text); 49 CIO_TRACE_EVENT(0, dbf_text);
49 CIO_HEX_EVENT(0, irb, sizeof (struct irb)); 50 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
50 } 51 }
@@ -59,10 +60,11 @@ ccw_device_path_notoper(struct ccw_device *cdev)
59 struct subchannel *sch; 60 struct subchannel *sch;
60 61
61 sch = to_subchannel(cdev->dev.parent); 62 sch = to_subchannel(cdev->dev.parent);
62 stsch (sch->irq, &sch->schib); 63 stsch (sch->schid, &sch->schib);
63 64
64 CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " 65 CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
65 "not operational \n", __FUNCTION__, sch->irq, 66 "not operational \n", __FUNCTION__,
67 sch->schid.ssid, sch->schid.sch_no,
66 sch->schib.pmcw.pnom); 68 sch->schib.pmcw.pnom);
67 69
68 sch->lpm &= ~sch->schib.pmcw.pnom; 70 sch->lpm &= ~sch->schib.pmcw.pnom;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 45480a2bc4c0..95a9462f9a91 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -1,12 +1,13 @@
1#ifndef S390_CIO_IOASM_H 1#ifndef S390_CIO_IOASM_H
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include "schid.h"
5
4/* 6/*
5 * TPI info structure 7 * TPI info structure
6 */ 8 */
7struct tpi_info { 9struct tpi_info {
8 __u32 reserved1 : 16; /* reserved 0x00000001 */ 10 struct subchannel_id schid;
9 __u32 irq : 16; /* aka. subchannel number */
10 __u32 intparm; /* interruption parameter */ 11 __u32 intparm; /* interruption parameter */
11 __u32 adapter_IO : 1; 12 __u32 adapter_IO : 1;
12 __u32 reserved2 : 1; 13 __u32 reserved2 : 1;
@@ -21,7 +22,8 @@ struct tpi_info {
21 * Some S390 specific IO instructions as inline 22 * Some S390 specific IO instructions as inline
22 */ 23 */
23 24
24static inline int stsch(int irq, volatile struct schib *addr) 25static inline int stsch(struct subchannel_id schid,
26 volatile struct schib *addr)
25{ 27{
26 int ccode; 28 int ccode;
27 29
@@ -31,12 +33,42 @@ static inline int stsch(int irq, volatile struct schib *addr)
31 " ipm %0\n" 33 " ipm %0\n"
32 " srl %0,28" 34 " srl %0,28"
33 : "=d" (ccode) 35 : "=d" (ccode)
34 : "d" (irq | 0x10000), "a" (addr) 36 : "d" (schid), "a" (addr), "m" (*addr)
37 : "cc", "1" );
38 return ccode;
39}
40
41static inline int stsch_err(struct subchannel_id schid,
42 volatile struct schib *addr)
43{
44 int ccode;
45
46 __asm__ __volatile__(
47 " lhi %0,%3\n"
48 " lr 1,%1\n"
49 " stsch 0(%2)\n"
50 "0: ipm %0\n"
51 " srl %0,28\n"
52 "1:\n"
53#ifdef CONFIG_64BIT
54 ".section __ex_table,\"a\"\n"
55 " .align 8\n"
56 " .quad 0b,1b\n"
57 ".previous"
58#else
59 ".section __ex_table,\"a\"\n"
60 " .align 4\n"
61 " .long 0b,1b\n"
62 ".previous"
63#endif
64 : "=&d" (ccode)
65 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
35 : "cc", "1" ); 66 : "cc", "1" );
36 return ccode; 67 return ccode;
37} 68}
38 69
39static inline int msch(int irq, volatile struct schib *addr) 70static inline int msch(struct subchannel_id schid,
71 volatile struct schib *addr)
40{ 72{
41 int ccode; 73 int ccode;
42 74
@@ -46,12 +78,13 @@ static inline int msch(int irq, volatile struct schib *addr)
46 " ipm %0\n" 78 " ipm %0\n"
47 " srl %0,28" 79 " srl %0,28"
48 : "=d" (ccode) 80 : "=d" (ccode)
49 : "d" (irq | 0x10000L), "a" (addr) 81 : "d" (schid), "a" (addr), "m" (*addr)
50 : "cc", "1" ); 82 : "cc", "1" );
51 return ccode; 83 return ccode;
52} 84}
53 85
54static inline int msch_err(int irq, volatile struct schib *addr) 86static inline int msch_err(struct subchannel_id schid,
87 volatile struct schib *addr)
55{ 88{
56 int ccode; 89 int ccode;
57 90
@@ -62,7 +95,7 @@ static inline int msch_err(int irq, volatile struct schib *addr)
62 "0: ipm %0\n" 95 "0: ipm %0\n"
63 " srl %0,28\n" 96 " srl %0,28\n"
64 "1:\n" 97 "1:\n"
65#ifdef CONFIG_ARCH_S390X 98#ifdef CONFIG_64BIT
66 ".section __ex_table,\"a\"\n" 99 ".section __ex_table,\"a\"\n"
67 " .align 8\n" 100 " .align 8\n"
68 " .quad 0b,1b\n" 101 " .quad 0b,1b\n"
@@ -74,12 +107,13 @@ static inline int msch_err(int irq, volatile struct schib *addr)
74 ".previous" 107 ".previous"
75#endif 108#endif
76 : "=&d" (ccode) 109 : "=&d" (ccode)
77 : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO) 110 : "d" (schid), "a" (addr), "K" (-EIO), "m" (*addr)
78 : "cc", "1" ); 111 : "cc", "1" );
79 return ccode; 112 return ccode;
80} 113}
81 114
82static inline int tsch(int irq, volatile struct irb *addr) 115static inline int tsch(struct subchannel_id schid,
116 volatile struct irb *addr)
83{ 117{
84 int ccode; 118 int ccode;
85 119
@@ -89,7 +123,7 @@ static inline int tsch(int irq, volatile struct irb *addr)
89 " ipm %0\n" 123 " ipm %0\n"
90 " srl %0,28" 124 " srl %0,28"
91 : "=d" (ccode) 125 : "=d" (ccode)
92 : "d" (irq | 0x10000L), "a" (addr) 126 : "d" (schid), "a" (addr), "m" (*addr)
93 : "cc", "1" ); 127 : "cc", "1" );
94 return ccode; 128 return ccode;
95} 129}
@@ -103,12 +137,13 @@ static inline int tpi( volatile struct tpi_info *addr)
103 " ipm %0\n" 137 " ipm %0\n"
104 " srl %0,28" 138 " srl %0,28"
105 : "=d" (ccode) 139 : "=d" (ccode)
106 : "a" (addr) 140 : "a" (addr), "m" (*addr)
107 : "cc", "1" ); 141 : "cc", "1" );
108 return ccode; 142 return ccode;
109} 143}
110 144
111static inline int ssch(int irq, volatile struct orb *addr) 145static inline int ssch(struct subchannel_id schid,
146 volatile struct orb *addr)
112{ 147{
113 int ccode; 148 int ccode;
114 149
@@ -118,12 +153,12 @@ static inline int ssch(int irq, volatile struct orb *addr)
118 " ipm %0\n" 153 " ipm %0\n"
119 " srl %0,28" 154 " srl %0,28"
120 : "=d" (ccode) 155 : "=d" (ccode)
121 : "d" (irq | 0x10000L), "a" (addr) 156 : "d" (schid), "a" (addr), "m" (*addr)
122 : "cc", "1" ); 157 : "cc", "1" );
123 return ccode; 158 return ccode;
124} 159}
125 160
126static inline int rsch(int irq) 161static inline int rsch(struct subchannel_id schid)
127{ 162{
128 int ccode; 163 int ccode;
129 164
@@ -133,12 +168,12 @@ static inline int rsch(int irq)
133 " ipm %0\n" 168 " ipm %0\n"
134 " srl %0,28" 169 " srl %0,28"
135 : "=d" (ccode) 170 : "=d" (ccode)
136 : "d" (irq | 0x10000L) 171 : "d" (schid)
137 : "cc", "1" ); 172 : "cc", "1" );
138 return ccode; 173 return ccode;
139} 174}
140 175
141static inline int csch(int irq) 176static inline int csch(struct subchannel_id schid)
142{ 177{
143 int ccode; 178 int ccode;
144 179
@@ -148,12 +183,12 @@ static inline int csch(int irq)
148 " ipm %0\n" 183 " ipm %0\n"
149 " srl %0,28" 184 " srl %0,28"
150 : "=d" (ccode) 185 : "=d" (ccode)
151 : "d" (irq | 0x10000L) 186 : "d" (schid)
152 : "cc", "1" ); 187 : "cc", "1" );
153 return ccode; 188 return ccode;
154} 189}
155 190
156static inline int hsch(int irq) 191static inline int hsch(struct subchannel_id schid)
157{ 192{
158 int ccode; 193 int ccode;
159 194
@@ -163,12 +198,12 @@ static inline int hsch(int irq)
163 " ipm %0\n" 198 " ipm %0\n"
164 " srl %0,28" 199 " srl %0,28"
165 : "=d" (ccode) 200 : "=d" (ccode)
166 : "d" (irq | 0x10000L) 201 : "d" (schid)
167 : "cc", "1" ); 202 : "cc", "1" );
168 return ccode; 203 return ccode;
169} 204}
170 205
171static inline int xsch(int irq) 206static inline int xsch(struct subchannel_id schid)
172{ 207{
173 int ccode; 208 int ccode;
174 209
@@ -178,21 +213,22 @@ static inline int xsch(int irq)
178 " ipm %0\n" 213 " ipm %0\n"
179 " srl %0,28" 214 " srl %0,28"
180 : "=d" (ccode) 215 : "=d" (ccode)
181 : "d" (irq | 0x10000L) 216 : "d" (schid)
182 : "cc", "1" ); 217 : "cc", "1" );
183 return ccode; 218 return ccode;
184} 219}
185 220
186static inline int chsc(void *chsc_area) 221static inline int chsc(void *chsc_area)
187{ 222{
223 typedef struct { char _[4096]; } addr_type;
188 int cc; 224 int cc;
189 225
190 __asm__ __volatile__ ( 226 __asm__ __volatile__ (
191 ".insn rre,0xb25f0000,%1,0 \n\t" 227 ".insn rre,0xb25f0000,%2,0 \n\t"
192 "ipm %0 \n\t" 228 "ipm %0 \n\t"
193 "srl %0,28 \n\t" 229 "srl %0,28 \n\t"
194 : "=d" (cc) 230 : "=d" (cc), "=m" (*(addr_type *) chsc_area)
195 : "d" (chsc_area) 231 : "d" (chsc_area), "m" (*(addr_type *) chsc_area)
196 : "cc" ); 232 : "cc" );
197 233
198 return cc; 234 return cc;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index eb39218b925e..30a836ffc31f 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -56,7 +56,7 @@
56#include "ioasm.h" 56#include "ioasm.h"
57#include "chsc.h" 57#include "chsc.h"
58 58
59#define VERSION_QDIO_C "$Revision: 1.108 $" 59#define VERSION_QDIO_C "$Revision: 1.114 $"
60 60
61/****************** MODULE PARAMETER VARIABLES ********************/ 61/****************** MODULE PARAMETER VARIABLES ********************/
62MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); 62MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
@@ -76,6 +76,7 @@ static struct qdio_perf_stats perf_stats;
76#endif /* QDIO_PERFORMANCE_STATS */ 76#endif /* QDIO_PERFORMANCE_STATS */
77 77
78static int hydra_thinints; 78static int hydra_thinints;
79static int is_passthrough = 0;
79static int omit_svs; 80static int omit_svs;
80 81
81static int indicator_used[INDICATORS_PER_CACHELINE]; 82static int indicator_used[INDICATORS_PER_CACHELINE];
@@ -136,12 +137,126 @@ qdio_release_q(struct qdio_q *q)
136 atomic_dec(&q->use_count); 137 atomic_dec(&q->use_count);
137} 138}
138 139
139static volatile inline void 140/*check ccq */
140qdio_set_slsb(volatile char *slsb, unsigned char value) 141static inline int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{
144 char dbf_text[15];
145
146 if (ccq == 0 || ccq == 32 || ccq == 96)
147 return 0;
148 if (ccq == 97)
149 return 1;
150 /*notify devices immediately*/
151 sprintf(dbf_text,"%d", ccq);
152 QDIO_DBF_TEXT2(1,trace,dbf_text);
153 return -EIO;
154}
155/* EQBS: extract buffer states */
156static inline int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt)
159{
160 struct qdio_irq *irq;
161 unsigned int tmp_cnt, q_no, ccq;
162 int rc ;
163 char dbf_text[15];
164
165 ccq = 0;
166 tmp_cnt = *cnt;
167 irq = (struct qdio_irq*)q->irq_ptr;
168 q_no = q->q_no;
169 if(!q->is_input_q)
170 q_no += irq->no_input_qs;
171 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
172 rc = qdio_check_ccq(q, ccq);
173 if (rc < 0) {
174 QDIO_DBF_TEXT2(1,trace,"eqberr");
175 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
176 QDIO_DBF_TEXT2(1,trace,dbf_text);
177 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
178 QDIO_STATUS_LOOK_FOR_ERROR,
179 0, 0, 0, -1, -1, q->int_parm);
180 return 0;
181 }
182 return (tmp_cnt - *cnt);
183}
184
185/* SQBS: set buffer states */
186static inline int
187qdio_do_sqbs(struct qdio_q *q, unsigned char state,
188 unsigned int *start, unsigned int *cnt)
141{ 189{
142 xchg((char*)slsb,value); 190 struct qdio_irq *irq;
191 unsigned int tmp_cnt, q_no, ccq;
192 int rc;
193 char dbf_text[15];
194
195 ccq = 0;
196 tmp_cnt = *cnt;
197 irq = (struct qdio_irq*)q->irq_ptr;
198 q_no = q->q_no;
199 if(!q->is_input_q)
200 q_no += irq->no_input_qs;
201 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
202 rc = qdio_check_ccq(q, ccq);
203 if (rc < 0) {
204 QDIO_DBF_TEXT3(1,trace,"sqberr");
205 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
206 QDIO_DBF_TEXT3(1,trace,dbf_text);
207 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
208 QDIO_STATUS_LOOK_FOR_ERROR,
209 0, 0, 0, -1, -1, q->int_parm);
210 return 0;
211 }
212 return (tmp_cnt - *cnt);
143} 213}
144 214
215static inline int
216qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
217 unsigned char state, unsigned int *count)
218{
219 volatile char *slsb;
220 struct qdio_irq *irq;
221
222 irq = (struct qdio_irq*)q->irq_ptr;
223 if (!irq->is_qebsm) {
224 slsb = (char *)&q->slsb.acc.val[(*bufno)];
225 xchg(slsb, state);
226 return 1;
227 }
228 return qdio_do_sqbs(q, state, bufno, count);
229}
230
231#ifdef CONFIG_QDIO_DEBUG
232static inline void
233qdio_trace_slsb(struct qdio_q *q)
234{
235 if (q->queue_type==QDIO_TRACE_QTYPE) {
236 if (q->is_input_q)
237 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
238 QDIO_MAX_BUFFERS_PER_Q);
239 else
240 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
241 QDIO_MAX_BUFFERS_PER_Q);
242 }
243}
244#endif
245
246static inline int
247set_slsb(struct qdio_q *q, unsigned int *bufno,
248 unsigned char state, unsigned int *count)
249{
250 int rc;
251#ifdef CONFIG_QDIO_DEBUG
252 qdio_trace_slsb(q);
253#endif
254 rc = qdio_set_slsb(q, bufno, state, count);
255#ifdef CONFIG_QDIO_DEBUG
256 qdio_trace_slsb(q);
257#endif
258 return rc;
259}
145static inline int 260static inline int
146qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, 261qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
147 unsigned int gpr3) 262 unsigned int gpr3)
@@ -155,7 +270,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
155 perf_stats.siga_syncs++; 270 perf_stats.siga_syncs++;
156#endif /* QDIO_PERFORMANCE_STATS */ 271#endif /* QDIO_PERFORMANCE_STATS */
157 272
158 cc = do_siga_sync(q->irq, gpr2, gpr3); 273 cc = do_siga_sync(q->schid, gpr2, gpr3);
159 if (cc) 274 if (cc)
160 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); 275 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
161 276
@@ -170,6 +285,23 @@ qdio_siga_sync_q(struct qdio_q *q)
170 return qdio_siga_sync(q, q->mask, 0); 285 return qdio_siga_sync(q, q->mask, 0);
171} 286}
172 287
288static int
289__do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
290{
291 struct qdio_irq *irq;
292 unsigned int fc = 0;
293 unsigned long schid;
294
295 irq = (struct qdio_irq *) q->irq_ptr;
296 if (!irq->is_qebsm)
297 schid = *((u32 *)&q->schid);
298 else {
299 schid = irq->sch_token;
300 fc |= 0x80;
301 }
302 return do_siga_output(schid, q->mask, busy_bit, fc);
303}
304
173/* 305/*
174 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 306 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
175 * an access exception 307 * an access exception
@@ -189,7 +321,7 @@ qdio_siga_output(struct qdio_q *q)
189 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 321 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
190 322
191 for (;;) { 323 for (;;) {
192 cc = do_siga_output(q->irq, q->mask, &busy_bit); 324 cc = __do_siga_output(q, &busy_bit);
193//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); 325//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
194 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { 326 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
195 if (!start_time) 327 if (!start_time)
@@ -221,7 +353,7 @@ qdio_siga_input(struct qdio_q *q)
221 perf_stats.siga_ins++; 353 perf_stats.siga_ins++;
222#endif /* QDIO_PERFORMANCE_STATS */ 354#endif /* QDIO_PERFORMANCE_STATS */
223 355
224 cc = do_siga_input(q->irq, q->mask); 356 cc = do_siga_input(q->schid, q->mask);
225 357
226 if (cc) 358 if (cc)
227 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); 359 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
@@ -230,7 +362,7 @@ qdio_siga_input(struct qdio_q *q)
230} 362}
231 363
232/* locked by the locks in qdio_activate and qdio_cleanup */ 364/* locked by the locks in qdio_activate and qdio_cleanup */
233static __u32 volatile * 365static __u32 *
234qdio_get_indicator(void) 366qdio_get_indicator(void)
235{ 367{
236 int i; 368 int i;
@@ -258,7 +390,7 @@ qdio_put_indicator(__u32 *addr)
258 atomic_dec(&spare_indicator_usecount); 390 atomic_dec(&spare_indicator_usecount);
259} 391}
260 392
261static inline volatile void 393static inline void
262tiqdio_clear_summary_bit(__u32 *location) 394tiqdio_clear_summary_bit(__u32 *location)
263{ 395{
264 QDIO_DBF_TEXT5(0,trace,"clrsummb"); 396 QDIO_DBF_TEXT5(0,trace,"clrsummb");
@@ -267,7 +399,7 @@ tiqdio_clear_summary_bit(__u32 *location)
267 xchg(location,0); 399 xchg(location,0);
268} 400}
269 401
270static inline volatile void 402static inline void
271tiqdio_set_summary_bit(__u32 *location) 403tiqdio_set_summary_bit(__u32 *location)
272{ 404{
273 QDIO_DBF_TEXT5(0,trace,"setsummb"); 405 QDIO_DBF_TEXT5(0,trace,"setsummb");
@@ -336,7 +468,9 @@ static inline int
336qdio_stop_polling(struct qdio_q *q) 468qdio_stop_polling(struct qdio_q *q)
337{ 469{
338#ifdef QDIO_USE_PROCESSING_STATE 470#ifdef QDIO_USE_PROCESSING_STATE
339 int gsf; 471 unsigned int tmp, gsf, count = 1;
472 unsigned char state = 0;
473 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
340 474
341 if (!atomic_swap(&q->polling,0)) 475 if (!atomic_swap(&q->polling,0))
342 return 1; 476 return 1;
@@ -348,17 +482,22 @@ qdio_stop_polling(struct qdio_q *q)
348 if (!q->is_input_q) 482 if (!q->is_input_q)
349 return 1; 483 return 1;
350 484
351 gsf=GET_SAVED_FRONTIER(q); 485 tmp = gsf = GET_SAVED_FRONTIER(q);
352 set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& 486 tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
353 (QDIO_MAX_BUFFERS_PER_Q-1)], 487 set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
354 SLSB_P_INPUT_NOT_INIT); 488
355 /* 489 /*
356 * we don't issue this SYNC_MEMORY, as we trust Rick T and 490 * we don't issue this SYNC_MEMORY, as we trust Rick T and
357 * moreover will not use the PROCESSING state under VM, so 491 * moreover will not use the PROCESSING state under VM, so
358 * q->polling was 0 anyway 492 * q->polling was 0 anyway
359 */ 493 */
360 /*SYNC_MEMORY;*/ 494 /*SYNC_MEMORY;*/
361 if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) 495 if (irq->is_qebsm) {
496 count = 1;
497 qdio_do_eqbs(q, &state, &gsf, &count);
498 } else
499 state = q->slsb.acc.val[gsf];
500 if (state != SLSB_P_INPUT_PRIMED)
362 return 1; 501 return 1;
363 /* 502 /*
364 * set our summary bit again, as otherwise there is a 503 * set our summary bit again, as otherwise there is a
@@ -431,18 +570,136 @@ tiqdio_clear_global_summary(void)
431 570
432 571
433/************************* OUTBOUND ROUTINES *******************************/ 572/************************* OUTBOUND ROUTINES *******************************/
573static int
574qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
575{
576 struct qdio_irq *irq;
577 unsigned char state;
578 unsigned int cnt, count, ftc;
579
580 irq = (struct qdio_irq *) q->irq_ptr;
581 if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
582 SYNC_MEMORY;
583
584 ftc = q->first_to_check;
585 count = qdio_min(atomic_read(&q->number_of_buffers_used),
586 (QDIO_MAX_BUFFERS_PER_Q-1));
587 if (count == 0)
588 return q->first_to_check;
589 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
590 if (cnt == 0)
591 return q->first_to_check;
592 switch (state) {
593 case SLSB_P_OUTPUT_ERROR:
594 QDIO_DBF_TEXT3(0,trace,"outperr");
595 atomic_sub(cnt , &q->number_of_buffers_used);
596 if (q->qdio_error)
597 q->error_status_flags |=
598 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
599 q->qdio_error = SLSB_P_OUTPUT_ERROR;
600 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
601 q->first_to_check = ftc;
602 break;
603 case SLSB_P_OUTPUT_EMPTY:
604 QDIO_DBF_TEXT5(0,trace,"outpempt");
605 atomic_sub(cnt, &q->number_of_buffers_used);
606 q->first_to_check = ftc;
607 break;
608 case SLSB_CU_OUTPUT_PRIMED:
609 /* all buffers primed */
610 QDIO_DBF_TEXT5(0,trace,"outpprim");
611 break;
612 default:
613 break;
614 }
615 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
616 return q->first_to_check;
617}
618
619static int
620qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
621{
622 struct qdio_irq *irq;
623 unsigned char state;
624 int tmp, ftc, count, cnt;
625 char dbf_text[15];
626
627
628 irq = (struct qdio_irq *) q->irq_ptr;
629 ftc = q->first_to_check;
630 count = qdio_min(atomic_read(&q->number_of_buffers_used),
631 (QDIO_MAX_BUFFERS_PER_Q-1));
632 if (count == 0)
633 return q->first_to_check;
634 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
635 if (cnt == 0)
636 return q->first_to_check;
637 switch (state) {
638 case SLSB_P_INPUT_ERROR :
639#ifdef CONFIG_QDIO_DEBUG
640 QDIO_DBF_TEXT3(1,trace,"inperr");
641 sprintf(dbf_text,"%2x,%2x",ftc,count);
642 QDIO_DBF_TEXT3(1,trace,dbf_text);
643#endif /* CONFIG_QDIO_DEBUG */
644 if (q->qdio_error)
645 q->error_status_flags |=
646 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
647 q->qdio_error = SLSB_P_INPUT_ERROR;
648 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
649 atomic_sub(cnt, &q->number_of_buffers_used);
650 q->first_to_check = ftc;
651 break;
652 case SLSB_P_INPUT_PRIMED :
653 QDIO_DBF_TEXT3(0,trace,"inptprim");
654 sprintf(dbf_text,"%2x,%2x",ftc,count);
655 QDIO_DBF_TEXT3(1,trace,dbf_text);
656 tmp = 0;
657 ftc = q->first_to_check;
658#ifdef QDIO_USE_PROCESSING_STATE
659 if (cnt > 1) {
660 cnt -= 1;
661 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
662 if (!tmp)
663 break;
664 }
665 cnt = 1;
666 tmp += set_slsb(q, &ftc,
667 SLSB_P_INPUT_PROCESSING, &cnt);
668 atomic_set(&q->polling, 1);
669#else
670 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
671#endif
672 atomic_sub(tmp, &q->number_of_buffers_used);
673 q->first_to_check = ftc;
674 break;
675 case SLSB_CU_INPUT_EMPTY:
676 case SLSB_P_INPUT_NOT_INIT:
677 case SLSB_P_INPUT_PROCESSING:
678 QDIO_DBF_TEXT5(0,trace,"inpnipro");
679 break;
680 default:
681 break;
682 }
683 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
684 return q->first_to_check;
685}
434 686
435static inline int 687static inline int
436qdio_get_outbound_buffer_frontier(struct qdio_q *q) 688qdio_get_outbound_buffer_frontier(struct qdio_q *q)
437{ 689{
438 int f,f_mod_no; 690 struct qdio_irq *irq;
439 volatile char *slsb; 691 volatile char *slsb;
440 int first_not_to_check; 692 unsigned int count = 1;
693 int first_not_to_check, f, f_mod_no;
441 char dbf_text[15]; 694 char dbf_text[15];
442 695
443 QDIO_DBF_TEXT4(0,trace,"getobfro"); 696 QDIO_DBF_TEXT4(0,trace,"getobfro");
444 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 697 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
445 698
699 irq = (struct qdio_irq *) q->irq_ptr;
700 if (irq->is_qebsm)
701 return qdio_qebsm_get_outbound_buffer_frontier(q);
702
446 slsb=&q->slsb.acc.val[0]; 703 slsb=&q->slsb.acc.val[0];
447 f_mod_no=f=q->first_to_check; 704 f_mod_no=f=q->first_to_check;
448 /* 705 /*
@@ -484,7 +741,7 @@ check_next:
484 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); 741 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
485 742
486 /* kind of process the buffer */ 743 /* kind of process the buffer */
487 set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); 744 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
488 745
489 /* 746 /*
490 * we increment the frontier, as this buffer 747 * we increment the frontier, as this buffer
@@ -597,48 +854,48 @@ qdio_kick_outbound_q(struct qdio_q *q)
597 854
598 result=qdio_siga_output(q); 855 result=qdio_siga_output(q);
599 856
600 switch (result) { 857 switch (result) {
601 case 0: 858 case 0:
602 /* went smooth this time, reset timestamp */ 859 /* went smooth this time, reset timestamp */
603#ifdef CONFIG_QDIO_DEBUG 860#ifdef CONFIG_QDIO_DEBUG
604 QDIO_DBF_TEXT3(0,trace,"cc2reslv"); 861 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
605 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, 862 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
606 atomic_read(&q->busy_siga_counter)); 863 atomic_read(&q->busy_siga_counter));
607 QDIO_DBF_TEXT3(0,trace,dbf_text); 864 QDIO_DBF_TEXT3(0,trace,dbf_text);
608#endif /* CONFIG_QDIO_DEBUG */ 865#endif /* CONFIG_QDIO_DEBUG */
609 q->timing.busy_start=0; 866 q->timing.busy_start=0;
867 break;
868 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
869 /* cc=2 and busy bit: */
870 atomic_inc(&q->busy_siga_counter);
871
872 /* if the last siga was successful, save
873 * timestamp here */
874 if (!q->timing.busy_start)
875 q->timing.busy_start=NOW;
876
877 /* if we're in time, don't touch error_status_flags
878 * and siga_error */
879 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
880 qdio_mark_q(q);
610 break; 881 break;
611 case (2|QDIO_SIGA_ERROR_B_BIT_SET): 882 }
612 /* cc=2 and busy bit: */ 883 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
613 atomic_inc(&q->busy_siga_counter);
614
615 /* if the last siga was successful, save
616 * timestamp here */
617 if (!q->timing.busy_start)
618 q->timing.busy_start=NOW;
619
620 /* if we're in time, don't touch error_status_flags
621 * and siga_error */
622 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
623 qdio_mark_q(q);
624 break;
625 }
626 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
627#ifdef CONFIG_QDIO_DEBUG 884#ifdef CONFIG_QDIO_DEBUG
628 sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, 885 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
629 atomic_read(&q->busy_siga_counter)); 886 atomic_read(&q->busy_siga_counter));
630 QDIO_DBF_TEXT3(0,trace,dbf_text); 887 QDIO_DBF_TEXT3(0,trace,dbf_text);
631#endif /* CONFIG_QDIO_DEBUG */ 888#endif /* CONFIG_QDIO_DEBUG */
632 /* else fallthrough and report error */ 889 /* else fallthrough and report error */
633 default: 890 default:
634 /* for plain cc=1, 2 or 3: */ 891 /* for plain cc=1, 2 or 3: */
635 if (q->siga_error) 892 if (q->siga_error)
636 q->error_status_flags|=
637 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
638 q->error_status_flags|= 893 q->error_status_flags|=
639 QDIO_STATUS_LOOK_FOR_ERROR; 894 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
640 q->siga_error=result; 895 q->error_status_flags|=
641 } 896 QDIO_STATUS_LOOK_FOR_ERROR;
897 q->siga_error=result;
898 }
642} 899}
643 900
644static inline void 901static inline void
@@ -743,8 +1000,10 @@ qdio_outbound_processing(struct qdio_q *q)
743static inline int 1000static inline int
744qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1001qdio_get_inbound_buffer_frontier(struct qdio_q *q)
745{ 1002{
1003 struct qdio_irq *irq;
746 int f,f_mod_no; 1004 int f,f_mod_no;
747 volatile char *slsb; 1005 volatile char *slsb;
1006 unsigned int count = 1;
748 int first_not_to_check; 1007 int first_not_to_check;
749#ifdef CONFIG_QDIO_DEBUG 1008#ifdef CONFIG_QDIO_DEBUG
750 char dbf_text[15]; 1009 char dbf_text[15];
@@ -756,6 +1015,10 @@ qdio_get_inbound_buffer_frontier(struct qdio_q *q)
756 QDIO_DBF_TEXT4(0,trace,"getibfro"); 1015 QDIO_DBF_TEXT4(0,trace,"getibfro");
757 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 1016 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
758 1017
1018 irq = (struct qdio_irq *) q->irq_ptr;
1019 if (irq->is_qebsm)
1020 return qdio_qebsm_get_inbound_buffer_frontier(q);
1021
759 slsb=&q->slsb.acc.val[0]; 1022 slsb=&q->slsb.acc.val[0];
760 f_mod_no=f=q->first_to_check; 1023 f_mod_no=f=q->first_to_check;
761 /* 1024 /*
@@ -792,19 +1055,19 @@ check_next:
792 * kill VM in terms of CP overhead 1055 * kill VM in terms of CP overhead
793 */ 1056 */
794 if (q->siga_sync) { 1057 if (q->siga_sync) {
795 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1058 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
796 } else { 1059 } else {
797 /* set the previous buffer to NOT_INIT. The current 1060 /* set the previous buffer to NOT_INIT. The current
798 * buffer will be set to PROCESSING at the end of 1061 * buffer will be set to PROCESSING at the end of
799 * this function to avoid further interrupts. */ 1062 * this function to avoid further interrupts. */
800 if (last_position>=0) 1063 if (last_position>=0)
801 set_slsb(&slsb[last_position], 1064 set_slsb(q, &last_position,
802 SLSB_P_INPUT_NOT_INIT); 1065 SLSB_P_INPUT_NOT_INIT, &count);
803 atomic_set(&q->polling,1); 1066 atomic_set(&q->polling,1);
804 last_position=f_mod_no; 1067 last_position=f_mod_no;
805 } 1068 }
806#else /* QDIO_USE_PROCESSING_STATE */ 1069#else /* QDIO_USE_PROCESSING_STATE */
807 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1070 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
808#endif /* QDIO_USE_PROCESSING_STATE */ 1071#endif /* QDIO_USE_PROCESSING_STATE */
809 /* 1072 /*
810 * not needed, as the inbound queue will be synced on the next 1073 * not needed, as the inbound queue will be synced on the next
@@ -829,7 +1092,7 @@ check_next:
829 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); 1092 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
830 1093
831 /* kind of process the buffer */ 1094 /* kind of process the buffer */
832 set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); 1095 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
833 1096
834 if (q->qdio_error) 1097 if (q->qdio_error)
835 q->error_status_flags|= 1098 q->error_status_flags|=
@@ -857,7 +1120,7 @@ out:
857 1120
858#ifdef QDIO_USE_PROCESSING_STATE 1121#ifdef QDIO_USE_PROCESSING_STATE
859 if (last_position>=0) 1122 if (last_position>=0)
860 set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); 1123 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count);
861#endif /* QDIO_USE_PROCESSING_STATE */ 1124#endif /* QDIO_USE_PROCESSING_STATE */
862 1125
863 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); 1126 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
@@ -902,6 +1165,10 @@ static inline int
902tiqdio_is_inbound_q_done(struct qdio_q *q) 1165tiqdio_is_inbound_q_done(struct qdio_q *q)
903{ 1166{
904 int no_used; 1167 int no_used;
1168 unsigned int start_buf, count;
1169 unsigned char state = 0;
1170 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1171
905#ifdef CONFIG_QDIO_DEBUG 1172#ifdef CONFIG_QDIO_DEBUG
906 char dbf_text[15]; 1173 char dbf_text[15];
907#endif 1174#endif
@@ -927,8 +1194,13 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
927 if (!q->siga_sync) 1194 if (!q->siga_sync)
928 /* we'll check for more primed buffers in qeth_stop_polling */ 1195 /* we'll check for more primed buffers in qeth_stop_polling */
929 return 0; 1196 return 0;
930 1197 if (irq->is_qebsm) {
931 if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) 1198 count = 1;
1199 start_buf = q->first_to_check;
1200 qdio_do_eqbs(q, &state, &start_buf, &count);
1201 } else
1202 state = q->slsb.acc.val[q->first_to_check];
1203 if (state != SLSB_P_INPUT_PRIMED)
932 /* 1204 /*
933 * nothing more to do, if next buffer is not PRIMED. 1205 * nothing more to do, if next buffer is not PRIMED.
934 * note that we did a SYNC_MEMORY before, that there 1206 * note that we did a SYNC_MEMORY before, that there
@@ -955,6 +1227,10 @@ static inline int
955qdio_is_inbound_q_done(struct qdio_q *q) 1227qdio_is_inbound_q_done(struct qdio_q *q)
956{ 1228{
957 int no_used; 1229 int no_used;
1230 unsigned int start_buf, count;
1231 unsigned char state = 0;
1232 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1233
958#ifdef CONFIG_QDIO_DEBUG 1234#ifdef CONFIG_QDIO_DEBUG
959 char dbf_text[15]; 1235 char dbf_text[15];
960#endif 1236#endif
@@ -973,8 +1249,13 @@ qdio_is_inbound_q_done(struct qdio_q *q)
973 QDIO_DBF_TEXT4(0,trace,dbf_text); 1249 QDIO_DBF_TEXT4(0,trace,dbf_text);
974 return 1; 1250 return 1;
975 } 1251 }
976 1252 if (irq->is_qebsm) {
977 if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { 1253 count = 1;
1254 start_buf = q->first_to_check;
1255 qdio_do_eqbs(q, &state, &start_buf, &count);
1256 } else
1257 state = q->slsb.acc.val[q->first_to_check];
1258 if (state == SLSB_P_INPUT_PRIMED) {
978 /* we got something to do */ 1259 /* we got something to do */
979 QDIO_DBF_TEXT4(0,trace,"inqisntA"); 1260 QDIO_DBF_TEXT4(0,trace,"inqisntA");
980 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 1261 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -1456,7 +1737,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1456 void *ptr; 1737 void *ptr;
1457 int available; 1738 int available;
1458 1739
1459 sprintf(dbf_text,"qfqs%4x",cdev->private->irq); 1740 sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
1460 QDIO_DBF_TEXT0(0,setup,dbf_text); 1741 QDIO_DBF_TEXT0(0,setup,dbf_text);
1461 for (i=0;i<no_input_qs;i++) { 1742 for (i=0;i<no_input_qs;i++) {
1462 q=irq_ptr->input_qs[i]; 1743 q=irq_ptr->input_qs[i];
@@ -1476,7 +1757,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1476 1757
1477 q->queue_type=q_format; 1758 q->queue_type=q_format;
1478 q->int_parm=int_parm; 1759 q->int_parm=int_parm;
1479 q->irq=irq_ptr->irq; 1760 q->schid = irq_ptr->schid;
1480 q->irq_ptr = irq_ptr; 1761 q->irq_ptr = irq_ptr;
1481 q->cdev = cdev; 1762 q->cdev = cdev;
1482 q->mask=1<<(31-i); 1763 q->mask=1<<(31-i);
@@ -1523,11 +1804,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1523 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); 1804 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1524 1805
1525 /* fill in slsb */ 1806 /* fill in slsb */
1526 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { 1807 if (!irq_ptr->is_qebsm) {
1527 set_slsb(&q->slsb.acc.val[j], 1808 unsigned int count = 1;
1528 SLSB_P_INPUT_NOT_INIT); 1809 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1529/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ 1810 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1530 } 1811 }
1531 } 1812 }
1532 1813
1533 for (i=0;i<no_output_qs;i++) { 1814 for (i=0;i<no_output_qs;i++) {
@@ -1549,7 +1830,7 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1549 q->queue_type=q_format; 1830 q->queue_type=q_format;
1550 q->int_parm=int_parm; 1831 q->int_parm=int_parm;
1551 q->is_input_q=0; 1832 q->is_input_q=0;
1552 q->irq=irq_ptr->irq; 1833 q->schid = irq_ptr->schid;
1553 q->cdev = cdev; 1834 q->cdev = cdev;
1554 q->irq_ptr = irq_ptr; 1835 q->irq_ptr = irq_ptr;
1555 q->mask=1<<(31-i); 1836 q->mask=1<<(31-i);
@@ -1584,11 +1865,11 @@ qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1584 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); 1865 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1585 1866
1586 /* fill in slsb */ 1867 /* fill in slsb */
1587 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { 1868 if (!irq_ptr->is_qebsm) {
1588 set_slsb(&q->slsb.acc.val[j], 1869 unsigned int count = 1;
1589 SLSB_P_OUTPUT_NOT_INIT); 1870 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1590/* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ 1871 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1591 } 1872 }
1592 } 1873 }
1593} 1874}
1594 1875
@@ -1656,7 +1937,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1656 char dbf_text[15]; 1937 char dbf_text[15];
1657 1938
1658 QDIO_DBF_TEXT5(0,trace,"newstate"); 1939 QDIO_DBF_TEXT5(0,trace,"newstate");
1659 sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state); 1940 sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1660 QDIO_DBF_TEXT5(0,trace,dbf_text); 1941 QDIO_DBF_TEXT5(0,trace,dbf_text);
1661#endif /* CONFIG_QDIO_DEBUG */ 1942#endif /* CONFIG_QDIO_DEBUG */
1662 1943
@@ -1669,12 +1950,12 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1669} 1950}
1670 1951
1671static inline void 1952static inline void
1672qdio_irq_check_sense(int irq, struct irb *irb) 1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1673{ 1954{
1674 char dbf_text[15]; 1955 char dbf_text[15];
1675 1956
1676 if (irb->esw.esw0.erw.cons) { 1957 if (irb->esw.esw0.erw.cons) {
1677 sprintf(dbf_text,"sens%4x",irq); 1958 sprintf(dbf_text,"sens%4x",schid.sch_no);
1678 QDIO_DBF_TEXT2(1,trace,dbf_text); 1959 QDIO_DBF_TEXT2(1,trace,dbf_text);
1679 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); 1960 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1680 1961
@@ -1785,21 +2066,22 @@ qdio_timeout_handler(struct ccw_device *cdev)
1785 2066
1786 switch (irq_ptr->state) { 2067 switch (irq_ptr->state) {
1787 case QDIO_IRQ_STATE_INACTIVE: 2068 case QDIO_IRQ_STATE_INACTIVE:
1788 QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", 2069 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
1789 irq_ptr->irq); 2070 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1790 QDIO_DBF_TEXT2(1,setup,"eq:timeo"); 2071 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
1791 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2072 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1792 break; 2073 break;
1793 case QDIO_IRQ_STATE_CLEANUP: 2074 case QDIO_IRQ_STATE_CLEANUP:
1794 QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", 2075 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
1795 irq_ptr->irq); 2076 "irq=0.%x.%x.\n",
2077 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1796 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2078 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1797 break; 2079 break;
1798 case QDIO_IRQ_STATE_ESTABLISHED: 2080 case QDIO_IRQ_STATE_ESTABLISHED:
1799 case QDIO_IRQ_STATE_ACTIVE: 2081 case QDIO_IRQ_STATE_ACTIVE:
1800 /* I/O has been terminated by common I/O layer. */ 2082 /* I/O has been terminated by common I/O layer. */
1801 QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", 2083 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
1802 irq_ptr->irq); 2084 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1803 QDIO_DBF_TEXT2(1, trace, "cio:term"); 2085 QDIO_DBF_TEXT2(1, trace, "cio:term");
1804 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 2086 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1805 if (get_device(&cdev->dev)) { 2087 if (get_device(&cdev->dev)) {
@@ -1862,7 +2144,7 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1862 } 2144 }
1863 } 2145 }
1864 2146
1865 qdio_irq_check_sense(irq_ptr->irq, irb); 2147 qdio_irq_check_sense(irq_ptr->schid, irb);
1866 2148
1867#ifdef CONFIG_QDIO_DEBUG 2149#ifdef CONFIG_QDIO_DEBUG
1868 sprintf(dbf_text, "state:%d", irq_ptr->state); 2150 sprintf(dbf_text, "state:%d", irq_ptr->state);
@@ -1905,7 +2187,7 @@ int
1905qdio_synchronize(struct ccw_device *cdev, unsigned int flags, 2187qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1906 unsigned int queue_number) 2188 unsigned int queue_number)
1907{ 2189{
1908 int cc; 2190 int cc = 0;
1909 struct qdio_q *q; 2191 struct qdio_q *q;
1910 struct qdio_irq *irq_ptr; 2192 struct qdio_irq *irq_ptr;
1911 void *ptr; 2193 void *ptr;
@@ -1918,7 +2200,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1918 return -ENODEV; 2200 return -ENODEV;
1919 2201
1920#ifdef CONFIG_QDIO_DEBUG 2202#ifdef CONFIG_QDIO_DEBUG
1921 *((int*)(&dbf_text[4])) = irq_ptr->irq; 2203 *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
1922 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); 2204 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
1923 *((int*)(&dbf_text[0]))=flags; 2205 *((int*)(&dbf_text[0]))=flags;
1924 *((int*)(&dbf_text[4]))=queue_number; 2206 *((int*)(&dbf_text[4]))=queue_number;
@@ -1929,12 +2211,14 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1929 q=irq_ptr->input_qs[queue_number]; 2211 q=irq_ptr->input_qs[queue_number];
1930 if (!q) 2212 if (!q)
1931 return -EINVAL; 2213 return -EINVAL;
1932 cc = do_siga_sync(q->irq, 0, q->mask); 2214 if (!(irq_ptr->is_qebsm))
2215 cc = do_siga_sync(q->schid, 0, q->mask);
1933 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { 2216 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
1934 q=irq_ptr->output_qs[queue_number]; 2217 q=irq_ptr->output_qs[queue_number];
1935 if (!q) 2218 if (!q)
1936 return -EINVAL; 2219 return -EINVAL;
1937 cc = do_siga_sync(q->irq, q->mask, 0); 2220 if (!(irq_ptr->is_qebsm))
2221 cc = do_siga_sync(q->schid, q->mask, 0);
1938 } else 2222 } else
1939 return -EINVAL; 2223 return -EINVAL;
1940 2224
@@ -1945,15 +2229,54 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
1945 return cc; 2229 return cc;
1946} 2230}
1947 2231
1948static unsigned char 2232static inline void
1949qdio_check_siga_needs(int sch) 2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token)
2235{
2236 struct qdio_q *q;
2237 int i;
2238 unsigned int count, start_buf;
2239 char dbf_text[15];
2240
2241 /*check if QEBSM is disabled */
2242 if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
2243 irq_ptr->is_qebsm = 0;
2244 irq_ptr->sch_token = 0;
2245 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2246 QDIO_DBF_TEXT0(0,setup,"noV=V");
2247 return;
2248 }
2249 irq_ptr->sch_token = token;
2250 /*input queue*/
2251 for (i = 0; i < irq_ptr->no_input_qs;i++) {
2252 q = irq_ptr->input_qs[i];
2253 count = QDIO_MAX_BUFFERS_PER_Q;
2254 start_buf = 0;
2255 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2256 }
2257 sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2258 QDIO_DBF_TEXT0(0,setup,dbf_text);
2259 sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2260 QDIO_DBF_TEXT0(0,setup,dbf_text);
2261 /*output queue*/
2262 for (i = 0; i < irq_ptr->no_output_qs; i++) {
2263 q = irq_ptr->output_qs[i];
2264 count = QDIO_MAX_BUFFERS_PER_Q;
2265 start_buf = 0;
2266 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2267 }
2268}
2269
2270static void
2271qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
1950{ 2272{
1951 int result; 2273 int result;
1952 unsigned char qdioac; 2274 unsigned char qdioac;
1953
1954 struct { 2275 struct {
1955 struct chsc_header request; 2276 struct chsc_header request;
1956 u16 reserved1; 2277 u16 reserved1:10;
2278 u16 ssid:2;
2279 u16 fmt:4;
1957 u16 first_sch; 2280 u16 first_sch;
1958 u16 reserved2; 2281 u16 reserved2;
1959 u16 last_sch; 2282 u16 last_sch;
@@ -1964,67 +2287,83 @@ qdio_check_siga_needs(int sch)
1964 u8 reserved5; 2287 u8 reserved5;
1965 u16 sch; 2288 u16 sch;
1966 u8 qfmt; 2289 u8 qfmt;
1967 u8 reserved6; 2290 u8 parm;
1968 u8 qdioac; 2291 u8 qdioac1;
1969 u8 sch_class; 2292 u8 sch_class;
1970 u8 reserved7; 2293 u8 reserved7;
1971 u8 icnt; 2294 u8 icnt;
1972 u8 reserved8; 2295 u8 reserved8;
1973 u8 ocnt; 2296 u8 ocnt;
2297 u8 reserved9;
2298 u8 mbccnt;
2299 u16 qdioac2;
2300 u64 sch_token;
1974 } *ssqd_area; 2301 } *ssqd_area;
1975 2302
2303 QDIO_DBF_TEXT0(0,setup,"getssqd");
2304 qdioac = 0;
1976 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2305 ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1977 if (!ssqd_area) { 2306 if (!ssqd_area) {
1978 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ 2307 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
1979 "SIGAs for sch x%x.\n", sch); 2308 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
1980 return CHSC_FLAG_SIGA_INPUT_NECESSARY || 2309 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1981 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2310 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
1982 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2311 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2312 irq_ptr->is_qebsm = 0;
2313 irq_ptr->sch_token = 0;
2314 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2315 return;
1983 } 2316 }
2317
1984 ssqd_area->request = (struct chsc_header) { 2318 ssqd_area->request = (struct chsc_header) {
1985 .length = 0x0010, 2319 .length = 0x0010,
1986 .code = 0x0024, 2320 .code = 0x0024,
1987 }; 2321 };
1988 2322 ssqd_area->first_sch = irq_ptr->schid.sch_no;
1989 ssqd_area->first_sch = sch; 2323 ssqd_area->last_sch = irq_ptr->schid.sch_no;
1990 ssqd_area->last_sch = sch; 2324 ssqd_area->ssid = irq_ptr->schid.ssid;
1991 2325 result = chsc(ssqd_area);
1992 result=chsc(ssqd_area);
1993 2326
1994 if (result) { 2327 if (result) {
1995 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ 2328 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
1996 "SIGAs for sch x%x.\n", 2329 "SIGAs for sch 0.%x.%x.\n", result,
1997 result,sch); 2330 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
1998 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || 2331 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
1999 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2332 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2000 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2333 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2334 irq_ptr->is_qebsm = 0;
2001 goto out; 2335 goto out;
2002 } 2336 }
2003 2337
2004 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { 2338 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2005 QDIO_PRINT_WARN("response upon checking SIGA needs " \ 2339 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2006 "is 0x%x. Using all SIGAs for sch x%x.\n", 2340 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
2007 ssqd_area->response.code, sch); 2341 ssqd_area->response.code,
2342 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2008 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || 2343 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2009 CHSC_FLAG_SIGA_OUTPUT_NECESSARY || 2344 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2010 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ 2345 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2346 irq_ptr->is_qebsm = 0;
2011 goto out; 2347 goto out;
2012 } 2348 }
2013 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || 2349 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2014 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || 2350 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2015 (ssqd_area->sch != sch)) { 2351 (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2016 QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ 2352 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2017 "using all SIGAs.\n",sch); 2353 "using all SIGAs.\n",
2354 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2018 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | 2355 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2019 CHSC_FLAG_SIGA_OUTPUT_NECESSARY | 2356 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2020 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ 2357 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2358 irq_ptr->is_qebsm = 0;
2021 goto out; 2359 goto out;
2022 } 2360 }
2023 2361 qdioac = ssqd_area->qdioac1;
2024 qdioac = ssqd_area->qdioac;
2025out: 2362out:
2363 qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2364 ssqd_area->sch_token);
2026 free_page ((unsigned long) ssqd_area); 2365 free_page ((unsigned long) ssqd_area);
2027 return qdioac; 2366 irq_ptr->qdioac = qdioac;
2028} 2367}
2029 2368
2030static unsigned int 2369static unsigned int
@@ -2055,6 +2394,13 @@ tiqdio_check_chsc_availability(void)
2055 sprintf(dbf_text,"hydrati%1x", hydra_thinints); 2394 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2056 QDIO_DBF_TEXT0(0,setup,dbf_text); 2395 QDIO_DBF_TEXT0(0,setup,dbf_text);
2057 2396
2397#ifdef CONFIG_64BIT
2398 /* Check for QEBSM support in general (bit 58). */
2399 is_passthrough = css_general_characteristics.qebsm;
2400#endif
2401 sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2402 QDIO_DBF_TEXT0(0,setup,dbf_text);
2403
2058 /* Check for aif time delay disablement fac (bit 56). If installed, 2404 /* Check for aif time delay disablement fac (bit 56). If installed,
2059 * omit svs even under lpar (good point by rick again) */ 2405 * omit svs even under lpar (good point by rick again) */
2060 omit_svs = css_general_characteristics.aif_tdd; 2406 omit_svs = css_general_characteristics.aif_tdd;
@@ -2091,7 +2437,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2091 /* set to 0x10000000 to enable 2437 /* set to 0x10000000 to enable
2092 * time delay disablement facility */ 2438 * time delay disablement facility */
2093 u32 reserved5; 2439 u32 reserved5;
2094 u32 subsystem_id; 2440 struct subchannel_id schid;
2095 u32 reserved6[1004]; 2441 u32 reserved6[1004];
2096 struct chsc_header response; 2442 struct chsc_header response;
2097 u32 reserved7; 2443 u32 reserved7;
@@ -2113,7 +2459,8 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2113 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2459 scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2114 if (!scssc_area) { 2460 if (!scssc_area) {
2115 QDIO_PRINT_WARN("No memory for setting indicators on " \ 2461 QDIO_PRINT_WARN("No memory for setting indicators on " \
2116 "subchannel x%x.\n", irq_ptr->irq); 2462 "subchannel 0.%x.%x.\n",
2463 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2117 return -ENOMEM; 2464 return -ENOMEM;
2118 } 2465 }
2119 scssc_area->request = (struct chsc_header) { 2466 scssc_area->request = (struct chsc_header) {
@@ -2127,7 +2474,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2127 scssc_area->ks = QDIO_STORAGE_KEY; 2474 scssc_area->ks = QDIO_STORAGE_KEY;
2128 scssc_area->kc = QDIO_STORAGE_KEY; 2475 scssc_area->kc = QDIO_STORAGE_KEY;
2129 scssc_area->isc = TIQDIO_THININT_ISC; 2476 scssc_area->isc = TIQDIO_THININT_ISC;
2130 scssc_area->subsystem_id = (1<<16) + irq_ptr->irq; 2477 scssc_area->schid = irq_ptr->schid;
2131 /* enables the time delay disablement facility. Don't care 2478 /* enables the time delay disablement facility. Don't care
2132 * whether it is really there (i.e. we haven't checked for 2479 * whether it is really there (i.e. we haven't checked for
2133 * it) */ 2480 * it) */
@@ -2137,12 +2484,11 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2137 QDIO_PRINT_WARN("Time delay disablement facility " \ 2484 QDIO_PRINT_WARN("Time delay disablement facility " \
2138 "not available\n"); 2485 "not available\n");
2139 2486
2140
2141
2142 result = chsc(scssc_area); 2487 result = chsc(scssc_area);
2143 if (result) { 2488 if (result) {
2144 QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ 2489 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2145 "cc=%i.\n",irq_ptr->irq,result); 2490 "cc=%i.\n",
2491 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2146 result = -EIO; 2492 result = -EIO;
2147 goto out; 2493 goto out;
2148 } 2494 }
@@ -2198,7 +2544,8 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2198 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 2544 scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2199 if (!scsscf_area) { 2545 if (!scsscf_area) {
2200 QDIO_PRINT_WARN("No memory for setting delay target on " \ 2546 QDIO_PRINT_WARN("No memory for setting delay target on " \
2201 "subchannel x%x.\n", irq_ptr->irq); 2547 "subchannel 0.%x.%x.\n",
2548 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2202 return -ENOMEM; 2549 return -ENOMEM;
2203 } 2550 }
2204 scsscf_area->request = (struct chsc_header) { 2551 scsscf_area->request = (struct chsc_header) {
@@ -2210,8 +2557,10 @@ tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2210 2557
2211 result=chsc(scsscf_area); 2558 result=chsc(scsscf_area);
2212 if (result) { 2559 if (result) {
2213 QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ 2560 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2214 "cc=%i. Continuing.\n",irq_ptr->irq,result); 2561 "cc=%i. Continuing.\n",
2562 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2563 result);
2215 result = -EIO; 2564 result = -EIO;
2216 goto out; 2565 goto out;
2217 } 2566 }
@@ -2245,7 +2594,7 @@ qdio_cleanup(struct ccw_device *cdev, int how)
2245 if (!irq_ptr) 2594 if (!irq_ptr)
2246 return -ENODEV; 2595 return -ENODEV;
2247 2596
2248 sprintf(dbf_text,"qcln%4x",irq_ptr->irq); 2597 sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2249 QDIO_DBF_TEXT1(0,trace,dbf_text); 2598 QDIO_DBF_TEXT1(0,trace,dbf_text);
2250 QDIO_DBF_TEXT0(0,setup,dbf_text); 2599 QDIO_DBF_TEXT0(0,setup,dbf_text);
2251 2600
@@ -2272,7 +2621,7 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2272 2621
2273 down(&irq_ptr->setting_up_sema); 2622 down(&irq_ptr->setting_up_sema);
2274 2623
2275 sprintf(dbf_text,"qsqs%4x",irq_ptr->irq); 2624 sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2276 QDIO_DBF_TEXT1(0,trace,dbf_text); 2625 QDIO_DBF_TEXT1(0,trace,dbf_text);
2277 QDIO_DBF_TEXT0(0,setup,dbf_text); 2626 QDIO_DBF_TEXT0(0,setup,dbf_text);
2278 2627
@@ -2378,7 +2727,7 @@ qdio_free(struct ccw_device *cdev)
2378 2727
2379 down(&irq_ptr->setting_up_sema); 2728 down(&irq_ptr->setting_up_sema);
2380 2729
2381 sprintf(dbf_text,"qfqs%4x",irq_ptr->irq); 2730 sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2382 QDIO_DBF_TEXT1(0,trace,dbf_text); 2731 QDIO_DBF_TEXT1(0,trace,dbf_text);
2383 QDIO_DBF_TEXT0(0,setup,dbf_text); 2732 QDIO_DBF_TEXT0(0,setup,dbf_text);
2384 2733
@@ -2526,13 +2875,14 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2526 irq_ptr = cdev->private->qdio_data; 2875 irq_ptr = cdev->private->qdio_data;
2527 2876
2528 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { 2877 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2529 sprintf(dbf_text,"ick1%4x",irq_ptr->irq); 2878 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2530 QDIO_DBF_TEXT2(1,trace,dbf_text); 2879 QDIO_DBF_TEXT2(1,trace,dbf_text);
2531 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); 2880 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2532 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); 2881 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2533 QDIO_PRINT_ERR("received check condition on establish " \ 2882 QDIO_PRINT_ERR("received check condition on establish " \
2534 "queues on irq 0x%x (cs=x%x, ds=x%x).\n", 2883 "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2535 irq_ptr->irq,cstat,dstat); 2884 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2885 cstat,dstat);
2536 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); 2886 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2537 } 2887 }
2538 2888
@@ -2540,9 +2890,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2540 QDIO_DBF_TEXT2(1,setup,"eq:no de"); 2890 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2541 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); 2891 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2542 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); 2892 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2543 QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " 2893 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2544 "device end: dstat=%02x, cstat=%02x\n", 2894 "device end: dstat=%02x, cstat=%02x\n",
2545 irq_ptr->irq, dstat, cstat); 2895 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2896 dstat, cstat);
2546 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2897 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2547 return 1; 2898 return 1;
2548 } 2899 }
@@ -2551,10 +2902,10 @@ qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2551 QDIO_DBF_TEXT2(1,setup,"eq:badio"); 2902 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2552 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); 2903 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2553 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); 2904 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2554 QDIO_PRINT_ERR("establish queues on irq %04x: got " 2905 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2555 "the following devstat: dstat=%02x, " 2906 "the following devstat: dstat=%02x, "
2556 "cstat=%02x\n", 2907 "cstat=%02x\n", irq_ptr->schid.ssid,
2557 irq_ptr->irq, dstat, cstat); 2908 irq_ptr->schid.sch_no, dstat, cstat);
2558 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 2909 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2559 return 1; 2910 return 1;
2560 } 2911 }
@@ -2569,7 +2920,7 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2569 2920
2570 irq_ptr = cdev->private->qdio_data; 2921 irq_ptr = cdev->private->qdio_data;
2571 2922
2572 sprintf(dbf_text,"qehi%4x",cdev->private->irq); 2923 sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
2573 QDIO_DBF_TEXT0(0,setup,dbf_text); 2924 QDIO_DBF_TEXT0(0,setup,dbf_text);
2574 QDIO_DBF_TEXT0(0,trace,dbf_text); 2925 QDIO_DBF_TEXT0(0,trace,dbf_text);
2575 2926
@@ -2588,7 +2939,7 @@ qdio_initialize(struct qdio_initialize *init_data)
2588 int rc; 2939 int rc;
2589 char dbf_text[15]; 2940 char dbf_text[15];
2590 2941
2591 sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq); 2942 sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
2592 QDIO_DBF_TEXT0(0,setup,dbf_text); 2943 QDIO_DBF_TEXT0(0,setup,dbf_text);
2593 QDIO_DBF_TEXT0(0,trace,dbf_text); 2944 QDIO_DBF_TEXT0(0,trace,dbf_text);
2594 2945
@@ -2609,7 +2960,7 @@ qdio_allocate(struct qdio_initialize *init_data)
2609 struct qdio_irq *irq_ptr; 2960 struct qdio_irq *irq_ptr;
2610 char dbf_text[15]; 2961 char dbf_text[15];
2611 2962
2612 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq); 2963 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
2613 QDIO_DBF_TEXT0(0,setup,dbf_text); 2964 QDIO_DBF_TEXT0(0,setup,dbf_text);
2614 QDIO_DBF_TEXT0(0,trace,dbf_text); 2965 QDIO_DBF_TEXT0(0,trace,dbf_text);
2615 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || 2966 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
@@ -2682,7 +3033,7 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2682 3033
2683 irq_ptr->int_parm=init_data->int_parm; 3034 irq_ptr->int_parm=init_data->int_parm;
2684 3035
2685 irq_ptr->irq = init_data->cdev->private->irq; 3036 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
2686 irq_ptr->no_input_qs=init_data->no_input_qs; 3037 irq_ptr->no_input_qs=init_data->no_input_qs;
2687 irq_ptr->no_output_qs=init_data->no_output_qs; 3038 irq_ptr->no_output_qs=init_data->no_output_qs;
2688 3039
@@ -2698,11 +3049,12 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2698 QDIO_DBF_TEXT2(0,setup,dbf_text); 3049 QDIO_DBF_TEXT2(0,setup,dbf_text);
2699 3050
2700 if (irq_ptr->is_thinint_irq) { 3051 if (irq_ptr->is_thinint_irq) {
2701 irq_ptr->dev_st_chg_ind=qdio_get_indicator(); 3052 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
2702 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); 3053 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
2703 if (!irq_ptr->dev_st_chg_ind) { 3054 if (!irq_ptr->dev_st_chg_ind) {
2704 QDIO_PRINT_WARN("no indicator location available " \ 3055 QDIO_PRINT_WARN("no indicator location available " \
2705 "for irq 0x%x\n",irq_ptr->irq); 3056 "for irq 0.%x.%x\n",
3057 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2706 qdio_release_irq_memory(irq_ptr); 3058 qdio_release_irq_memory(irq_ptr);
2707 return -ENOBUFS; 3059 return -ENOBUFS;
2708 } 3060 }
@@ -2747,6 +3099,10 @@ int qdio_fill_irq(struct qdio_initialize *init_data)
2747 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; 3099 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
2748 3100
2749 /* fill in qib */ 3101 /* fill in qib */
3102 irq_ptr->is_qebsm = is_passthrough;
3103 if (irq_ptr->is_qebsm)
3104 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3105
2750 irq_ptr->qib.qfmt=init_data->q_format; 3106 irq_ptr->qib.qfmt=init_data->q_format;
2751 if (init_data->no_input_qs) 3107 if (init_data->no_input_qs)
2752 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); 3108 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
@@ -2829,7 +3185,7 @@ qdio_establish(struct qdio_initialize *init_data)
2829 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); 3185 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
2830 } 3186 }
2831 3187
2832 sprintf(dbf_text,"qest%4x",cdev->private->irq); 3188 sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
2833 QDIO_DBF_TEXT0(0,setup,dbf_text); 3189 QDIO_DBF_TEXT0(0,setup,dbf_text);
2834 QDIO_DBF_TEXT0(0,trace,dbf_text); 3190 QDIO_DBF_TEXT0(0,trace,dbf_text);
2835 3191
@@ -2855,9 +3211,10 @@ qdio_establish(struct qdio_initialize *init_data)
2855 sprintf(dbf_text,"eq:io%4x",result); 3211 sprintf(dbf_text,"eq:io%4x",result);
2856 QDIO_DBF_TEXT2(1,setup,dbf_text); 3212 QDIO_DBF_TEXT2(1,setup,dbf_text);
2857 } 3213 }
2858 QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ 3214 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
2859 "returned %i, next try returned %i\n", 3215 "returned %i, next try returned %i\n",
2860 irq_ptr->irq,result,result2); 3216 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3217 result, result2);
2861 result=result2; 3218 result=result2;
2862 if (result) 3219 if (result)
2863 ccw_device_set_timeout(cdev, 0); 3220 ccw_device_set_timeout(cdev, 0);
@@ -2884,7 +3241,7 @@ qdio_establish(struct qdio_initialize *init_data)
2884 return -EIO; 3241 return -EIO;
2885 } 3242 }
2886 3243
2887 irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); 3244 qdio_get_ssqd_information(irq_ptr);
2888 /* if this gets set once, we're running under VM and can omit SVSes */ 3245 /* if this gets set once, we're running under VM and can omit SVSes */
2889 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) 3246 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
2890 omit_svs=1; 3247 omit_svs=1;
@@ -2930,7 +3287,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
2930 goto out; 3287 goto out;
2931 } 3288 }
2932 3289
2933 sprintf(dbf_text,"qact%4x", irq_ptr->irq); 3290 sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
2934 QDIO_DBF_TEXT2(0,setup,dbf_text); 3291 QDIO_DBF_TEXT2(0,setup,dbf_text);
2935 QDIO_DBF_TEXT2(0,trace,dbf_text); 3292 QDIO_DBF_TEXT2(0,trace,dbf_text);
2936 3293
@@ -2955,9 +3312,10 @@ qdio_activate(struct ccw_device *cdev, int flags)
2955 sprintf(dbf_text,"aq:io%4x",result); 3312 sprintf(dbf_text,"aq:io%4x",result);
2956 QDIO_DBF_TEXT2(1,setup,dbf_text); 3313 QDIO_DBF_TEXT2(1,setup,dbf_text);
2957 } 3314 }
2958 QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ 3315 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
2959 "returned %i, next try returned %i\n", 3316 "returned %i, next try returned %i\n",
2960 irq_ptr->irq,result,result2); 3317 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3318 result, result2);
2961 result=result2; 3319 result=result2;
2962 } 3320 }
2963 3321
@@ -3015,30 +3373,40 @@ static inline void
3015qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3373qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3016 unsigned int count, struct qdio_buffer *buffers) 3374 unsigned int count, struct qdio_buffer *buffers)
3017{ 3375{
3376 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3377 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3378 if (irq->is_qebsm) {
3379 while (count)
3380 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3381 return;
3382 }
3018 for (;;) { 3383 for (;;) {
3019 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY); 3384 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3020 count--; 3385 count--;
3021 if (!count) break; 3386 if (!count) break;
3022 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); 3387 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3023 } 3388 }
3024
3025 /* not necessary, as the queues are synced during the SIGA read */
3026 /*SYNC_MEMORY;*/
3027} 3389}
3028 3390
3029static inline void 3391static inline void
3030qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3392qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3031 unsigned int count, struct qdio_buffer *buffers) 3393 unsigned int count, struct qdio_buffer *buffers)
3032{ 3394{
3395 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3396
3397 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3398 if (irq->is_qebsm) {
3399 while (count)
3400 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3401 return;
3402 }
3403
3033 for (;;) { 3404 for (;;) {
3034 set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED); 3405 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3035 count--; 3406 count--;
3036 if (!count) break; 3407 if (!count) break;
3037 qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); 3408 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3038 } 3409 }
3039
3040 /* SIGA write will sync the queues */
3041 /*SYNC_MEMORY;*/
3042} 3410}
3043 3411
3044static inline void 3412static inline void
@@ -3083,6 +3451,9 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3083 struct qdio_buffer *buffers) 3451 struct qdio_buffer *buffers)
3084{ 3452{
3085 int used_elements; 3453 int used_elements;
3454 unsigned int cnt, start_buf;
3455 unsigned char state = 0;
3456 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3086 3457
3087 /* This is the outbound handling of queues */ 3458 /* This is the outbound handling of queues */
3088#ifdef QDIO_PERFORMANCE_STATS 3459#ifdef QDIO_PERFORMANCE_STATS
@@ -3115,9 +3486,15 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3115 * SYNC_MEMORY :-/ ), we try to 3486 * SYNC_MEMORY :-/ ), we try to
3116 * fast-requeue buffers 3487 * fast-requeue buffers
3117 */ 3488 */
3118 if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) 3489 if (irq->is_qebsm) {
3119 &(QDIO_MAX_BUFFERS_PER_Q-1)]!= 3490 cnt = 1;
3120 SLSB_CU_OUTPUT_PRIMED) { 3491 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3492 (QDIO_MAX_BUFFERS_PER_Q-1));
3493 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3494 } else
3495 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3496 &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3497 if (state != SLSB_CU_OUTPUT_PRIMED) {
3121 qdio_kick_outbound_q(q); 3498 qdio_kick_outbound_q(q);
3122 } else { 3499 } else {
3123 QDIO_DBF_TEXT3(0,trace, "fast-req"); 3500 QDIO_DBF_TEXT3(0,trace, "fast-req");
@@ -3150,7 +3527,7 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3150#ifdef CONFIG_QDIO_DEBUG 3527#ifdef CONFIG_QDIO_DEBUG
3151 char dbf_text[20]; 3528 char dbf_text[20];
3152 3529
3153 sprintf(dbf_text,"doQD%04x",cdev->private->irq); 3530 sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
3154 QDIO_DBF_TEXT3(0,trace,dbf_text); 3531 QDIO_DBF_TEXT3(0,trace,dbf_text);
3155#endif /* CONFIG_QDIO_DEBUG */ 3532#endif /* CONFIG_QDIO_DEBUG */
3156 3533
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 328e31cc6854..fa385e761fe1 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -3,14 +3,15 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6#define VERSION_CIO_QDIO_H "$Revision: 1.33 $" 6#include "schid.h"
7
8#define VERSION_CIO_QDIO_H "$Revision: 1.40 $"
7 9
8#ifdef CONFIG_QDIO_DEBUG 10#ifdef CONFIG_QDIO_DEBUG
9#define QDIO_VERBOSE_LEVEL 9 11#define QDIO_VERBOSE_LEVEL 9
10#else /* CONFIG_QDIO_DEBUG */ 12#else /* CONFIG_QDIO_DEBUG */
11#define QDIO_VERBOSE_LEVEL 5 13#define QDIO_VERBOSE_LEVEL 5
12#endif /* CONFIG_QDIO_DEBUG */ 14#endif /* CONFIG_QDIO_DEBUG */
13
14#define QDIO_USE_PROCESSING_STATE 15#define QDIO_USE_PROCESSING_STATE
15 16
16#ifdef CONFIG_QDIO_PERF_STATS 17#ifdef CONFIG_QDIO_PERF_STATS
@@ -265,12 +266,64 @@ QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
265/* 266/*
266 * Some instructions as assembly 267 * Some instructions as assembly
267 */ 268 */
269
270static inline int
271do_sqbs(unsigned long sch, unsigned char state, int queue,
272 unsigned int *start, unsigned int *count)
273{
274#ifdef CONFIG_64BIT
275 register unsigned long _ccq asm ("0") = *count;
276 register unsigned long _sch asm ("1") = sch;
277 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
278
279 asm volatile (
280 " .insn rsy,0xeb000000008A,%1,0,0(%2)\n\t"
281 : "+d" (_ccq), "+d" (_queuestart)
282 : "d" ((unsigned long)state), "d" (_sch)
283 : "memory", "cc"
284 );
285 *count = _ccq & 0xff;
286 *start = _queuestart & 0xff;
287
288 return (_ccq >> 32) & 0xff;
289#else
290 return 0;
291#endif
292}
293
294static inline int
295do_eqbs(unsigned long sch, unsigned char *state, int queue,
296 unsigned int *start, unsigned int *count)
297{
298#ifdef CONFIG_64BIT
299 register unsigned long _ccq asm ("0") = *count;
300 register unsigned long _sch asm ("1") = sch;
301 unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
302 unsigned long _state = 0;
303
304 asm volatile (
305 " .insn rrf,0xB99c0000,%1,%2,0,0 \n\t"
306 : "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
307 : "d" (_sch)
308 : "memory", "cc"
309 );
310 *count = _ccq & 0xff;
311 *start = _queuestart & 0xff;
312 *state = _state & 0xff;
313
314 return (_ccq >> 32) & 0xff;
315#else
316 return 0;
317#endif
318}
319
320
268static inline int 321static inline int
269do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) 322do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
270{ 323{
271 int cc; 324 int cc;
272 325
273#ifndef CONFIG_ARCH_S390X 326#ifndef CONFIG_64BIT
274 asm volatile ( 327 asm volatile (
275 "lhi 0,2 \n\t" 328 "lhi 0,2 \n\t"
276 "lr 1,%1 \n\t" 329 "lr 1,%1 \n\t"
@@ -280,10 +333,10 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
280 "ipm %0 \n\t" 333 "ipm %0 \n\t"
281 "srl %0,28 \n\t" 334 "srl %0,28 \n\t"
282 : "=d" (cc) 335 : "=d" (cc)
283 : "d" (0x10000|irq), "d" (mask1), "d" (mask2) 336 : "d" (schid), "d" (mask1), "d" (mask2)
284 : "cc", "0", "1", "2", "3" 337 : "cc", "0", "1", "2", "3"
285 ); 338 );
286#else /* CONFIG_ARCH_S390X */ 339#else /* CONFIG_64BIT */
287 asm volatile ( 340 asm volatile (
288 "lghi 0,2 \n\t" 341 "lghi 0,2 \n\t"
289 "llgfr 1,%1 \n\t" 342 "llgfr 1,%1 \n\t"
@@ -293,19 +346,19 @@ do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2)
293 "ipm %0 \n\t" 346 "ipm %0 \n\t"
294 "srl %0,28 \n\t" 347 "srl %0,28 \n\t"
295 : "=d" (cc) 348 : "=d" (cc)
296 : "d" (0x10000|irq), "d" (mask1), "d" (mask2) 349 : "d" (schid), "d" (mask1), "d" (mask2)
297 : "cc", "0", "1", "2", "3" 350 : "cc", "0", "1", "2", "3"
298 ); 351 );
299#endif /* CONFIG_ARCH_S390X */ 352#endif /* CONFIG_64BIT */
300 return cc; 353 return cc;
301} 354}
302 355
303static inline int 356static inline int
304do_siga_input(unsigned int irq, unsigned int mask) 357do_siga_input(struct subchannel_id schid, unsigned int mask)
305{ 358{
306 int cc; 359 int cc;
307 360
308#ifndef CONFIG_ARCH_S390X 361#ifndef CONFIG_64BIT
309 asm volatile ( 362 asm volatile (
310 "lhi 0,1 \n\t" 363 "lhi 0,1 \n\t"
311 "lr 1,%1 \n\t" 364 "lr 1,%1 \n\t"
@@ -314,10 +367,10 @@ do_siga_input(unsigned int irq, unsigned int mask)
314 "ipm %0 \n\t" 367 "ipm %0 \n\t"
315 "srl %0,28 \n\t" 368 "srl %0,28 \n\t"
316 : "=d" (cc) 369 : "=d" (cc)
317 : "d" (0x10000|irq), "d" (mask) 370 : "d" (schid), "d" (mask)
318 : "cc", "0", "1", "2", "memory" 371 : "cc", "0", "1", "2", "memory"
319 ); 372 );
320#else /* CONFIG_ARCH_S390X */ 373#else /* CONFIG_64BIT */
321 asm volatile ( 374 asm volatile (
322 "lghi 0,1 \n\t" 375 "lghi 0,1 \n\t"
323 "llgfr 1,%1 \n\t" 376 "llgfr 1,%1 \n\t"
@@ -326,21 +379,22 @@ do_siga_input(unsigned int irq, unsigned int mask)
326 "ipm %0 \n\t" 379 "ipm %0 \n\t"
327 "srl %0,28 \n\t" 380 "srl %0,28 \n\t"
328 : "=d" (cc) 381 : "=d" (cc)
329 : "d" (0x10000|irq), "d" (mask) 382 : "d" (schid), "d" (mask)
330 : "cc", "0", "1", "2", "memory" 383 : "cc", "0", "1", "2", "memory"
331 ); 384 );
332#endif /* CONFIG_ARCH_S390X */ 385#endif /* CONFIG_64BIT */
333 386
334 return cc; 387 return cc;
335} 388}
336 389
337static inline int 390static inline int
338do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) 391do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
392 unsigned int fc)
339{ 393{
340 int cc; 394 int cc;
341 __u32 busy_bit; 395 __u32 busy_bit;
342 396
343#ifndef CONFIG_ARCH_S390X 397#ifndef CONFIG_64BIT
344 asm volatile ( 398 asm volatile (
345 "lhi 0,0 \n\t" 399 "lhi 0,0 \n\t"
346 "lr 1,%2 \n\t" 400 "lr 1,%2 \n\t"
@@ -366,14 +420,14 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
366 ".long 0b,2b \n\t" 420 ".long 0b,2b \n\t"
367 ".previous \n\t" 421 ".previous \n\t"
368 : "=d" (cc), "=d" (busy_bit) 422 : "=d" (cc), "=d" (busy_bit)
369 : "d" (0x10000|irq), "d" (mask), 423 : "d" (schid), "d" (mask),
370 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 424 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
371 : "cc", "0", "1", "2", "memory" 425 : "cc", "0", "1", "2", "memory"
372 ); 426 );
373#else /* CONFIG_ARCH_S390X */ 427#else /* CONFIG_64BIT */
374 asm volatile ( 428 asm volatile (
375 "lghi 0,0 \n\t" 429 "llgfr 0,%5 \n\t"
376 "llgfr 1,%2 \n\t" 430 "lgr 1,%2 \n\t"
377 "llgfr 2,%3 \n\t" 431 "llgfr 2,%3 \n\t"
378 "siga 0 \n\t" 432 "siga 0 \n\t"
379 "0:" 433 "0:"
@@ -391,11 +445,11 @@ do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb)
391 ".quad 0b,1b \n\t" 445 ".quad 0b,1b \n\t"
392 ".previous \n\t" 446 ".previous \n\t"
393 : "=d" (cc), "=d" (busy_bit) 447 : "=d" (cc), "=d" (busy_bit)
394 : "d" (0x10000|irq), "d" (mask), 448 : "d" (schid), "d" (mask),
395 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) 449 "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION), "d" (fc)
396 : "cc", "0", "1", "2", "memory" 450 : "cc", "0", "1", "2", "memory"
397 ); 451 );
398#endif /* CONFIG_ARCH_S390X */ 452#endif /* CONFIG_64BIT */
399 453
400 (*bb) = busy_bit; 454 (*bb) = busy_bit;
401 return cc; 455 return cc;
@@ -407,21 +461,21 @@ do_clear_global_summary(void)
407 461
408 unsigned long time; 462 unsigned long time;
409 463
410#ifndef CONFIG_ARCH_S390X 464#ifndef CONFIG_64BIT
411 asm volatile ( 465 asm volatile (
412 "lhi 1,3 \n\t" 466 "lhi 1,3 \n\t"
413 ".insn rre,0xb2650000,2,0 \n\t" 467 ".insn rre,0xb2650000,2,0 \n\t"
414 "lr %0,3 \n\t" 468 "lr %0,3 \n\t"
415 : "=d" (time) : : "cc", "1", "2", "3" 469 : "=d" (time) : : "cc", "1", "2", "3"
416 ); 470 );
417#else /* CONFIG_ARCH_S390X */ 471#else /* CONFIG_64BIT */
418 asm volatile ( 472 asm volatile (
419 "lghi 1,3 \n\t" 473 "lghi 1,3 \n\t"
420 ".insn rre,0xb2650000,2,0 \n\t" 474 ".insn rre,0xb2650000,2,0 \n\t"
421 "lgr %0,3 \n\t" 475 "lgr %0,3 \n\t"
422 : "=d" (time) : : "cc", "1", "2", "3" 476 : "=d" (time) : : "cc", "1", "2", "3"
423 ); 477 );
424#endif /* CONFIG_ARCH_S390X */ 478#endif /* CONFIG_64BIT */
425 479
426 return time; 480 return time;
427} 481}
@@ -488,42 +542,21 @@ struct qdio_perf_stats {
488 542
489#define MY_MODULE_STRING(x) #x 543#define MY_MODULE_STRING(x) #x
490 544
491#ifdef CONFIG_ARCH_S390X 545#ifdef CONFIG_64BIT
492#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) 546#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
493#else /* CONFIG_ARCH_S390X */ 547#else /* CONFIG_64BIT */
494#define QDIO_GET_ADDR(x) ((__u32)(long)x) 548#define QDIO_GET_ADDR(x) ((__u32)(long)x)
495#endif /* CONFIG_ARCH_S390X */ 549#endif /* CONFIG_64BIT */
496
497#ifdef CONFIG_QDIO_DEBUG
498#define set_slsb(x,y) \
499 if(q->queue_type==QDIO_TRACE_QTYPE) { \
500 if(q->is_input_q) { \
501 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
502 } else { \
503 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
504 } \
505 } \
506 qdio_set_slsb(x,y); \
507 if(q->queue_type==QDIO_TRACE_QTYPE) { \
508 if(q->is_input_q) { \
509 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
510 } else { \
511 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \
512 } \
513 }
514#else /* CONFIG_QDIO_DEBUG */
515#define set_slsb(x,y) qdio_set_slsb(x,y)
516#endif /* CONFIG_QDIO_DEBUG */
517 550
518struct qdio_q { 551struct qdio_q {
519 volatile struct slsb slsb; 552 volatile struct slsb slsb;
520 553
521 char unused[QDIO_MAX_BUFFERS_PER_Q]; 554 char unused[QDIO_MAX_BUFFERS_PER_Q];
522 555
523 __u32 * volatile dev_st_chg_ind; 556 __u32 * dev_st_chg_ind;
524 557
525 int is_input_q; 558 int is_input_q;
526 int irq; 559 struct subchannel_id schid;
527 struct ccw_device *cdev; 560 struct ccw_device *cdev;
528 561
529 unsigned int is_iqdio_q; 562 unsigned int is_iqdio_q;
@@ -568,6 +601,7 @@ struct qdio_q {
568 struct tasklet_struct tasklet; 601 struct tasklet_struct tasklet;
569#endif /* QDIO_USE_TIMERS_FOR_POLLING */ 602#endif /* QDIO_USE_TIMERS_FOR_POLLING */
570 603
604
571 enum qdio_irq_states state; 605 enum qdio_irq_states state;
572 606
573 /* used to store the error condition during a data transfer */ 607 /* used to store the error condition during a data transfer */
@@ -617,13 +651,17 @@ struct qdio_irq {
617 __u32 * volatile dev_st_chg_ind; 651 __u32 * volatile dev_st_chg_ind;
618 652
619 unsigned long int_parm; 653 unsigned long int_parm;
620 int irq; 654 struct subchannel_id schid;
621 655
622 unsigned int is_iqdio_irq; 656 unsigned int is_iqdio_irq;
623 unsigned int is_thinint_irq; 657 unsigned int is_thinint_irq;
624 unsigned int hydra_gives_outbound_pcis; 658 unsigned int hydra_gives_outbound_pcis;
625 unsigned int sync_done_on_outb_pcis; 659 unsigned int sync_done_on_outb_pcis;
626 660
661 /* QEBSM facility */
662 unsigned int is_qebsm;
663 unsigned long sch_token;
664
627 enum qdio_irq_states state; 665 enum qdio_irq_states state;
628 666
629 unsigned int no_input_qs; 667 unsigned int no_input_qs;
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
new file mode 100644
index 000000000000..54328fec5ade
--- /dev/null
+++ b/drivers/s390/cio/schid.h
@@ -0,0 +1,26 @@
1#ifndef S390_SCHID_H
2#define S390_SCHID_H
3
4struct subchannel_id {
5 __u32 reserved:13;
6 __u32 ssid:2;
7 __u32 one:1;
8 __u32 sch_no:16;
9} __attribute__ ((packed,aligned(4)));
10
11
12/* Helper function for sane state of pre-allocated subchannel_id. */
13static inline void
14init_subchannel_id(struct subchannel_id *schid)
15{
16 memset(schid, 0, sizeof(struct subchannel_id));
17 schid->one = 1;
18}
19
20static inline int
21schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
22{
23 return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
24}
25
26#endif /* S390_SCHID_H */
diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h
index e319e78b5ea2..f87c785f2039 100644
--- a/drivers/s390/crypto/z90common.h
+++ b/drivers/s390/crypto/z90common.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90common.h 2 * linux/drivers/s390/crypto/z90common.h
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -91,12 +91,13 @@ enum hdstat {
91#define TSQ_FATAL_ERROR 34 91#define TSQ_FATAL_ERROR 34
92#define RSQ_FATAL_ERROR 35 92#define RSQ_FATAL_ERROR 35
93 93
94#define Z90CRYPT_NUM_TYPES 5 94#define Z90CRYPT_NUM_TYPES 6
95#define PCICA 0 95#define PCICA 0
96#define PCICC 1 96#define PCICC 1
97#define PCIXCC_MCL2 2 97#define PCIXCC_MCL2 2
98#define PCIXCC_MCL3 3 98#define PCIXCC_MCL3 3
99#define CEX2C 4 99#define CEX2C 4
100#define CEX2A 5
100#define NILDEV -1 101#define NILDEV -1
101#define ANYDEV -1 102#define ANYDEV -1
102#define PCIXCC_UNK -2 103#define PCIXCC_UNK -2
@@ -105,7 +106,7 @@ enum hdevice_type {
105 PCICC_HW = 3, 106 PCICC_HW = 3,
106 PCICA_HW = 4, 107 PCICA_HW = 4,
107 PCIXCC_HW = 5, 108 PCIXCC_HW = 5,
108 OTHER_HW = 6, 109 CEX2A_HW = 6,
109 CEX2C_HW = 7 110 CEX2C_HW = 7
110}; 111};
111 112
diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h
index 0a3bb5a10dd4..3a18443fdfa7 100644
--- a/drivers/s390/crypto/z90crypt.h
+++ b/drivers/s390/crypto/z90crypt.h
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90crypt.h 2 * linux/drivers/s390/crypto/z90crypt.h
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -29,11 +29,11 @@
29 29
30#include <linux/ioctl.h> 30#include <linux/ioctl.h>
31 31
32#define VERSION_Z90CRYPT_H "$Revision: 1.11 $" 32#define VERSION_Z90CRYPT_H "$Revision: 1.2.2.4 $"
33 33
34#define z90crypt_VERSION 1 34#define z90crypt_VERSION 1
35#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards 35#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards
36#define z90crypt_VARIANT 2 // 2 = added PCIXCC MCL3 and CEX2C support 36#define z90crypt_VARIANT 3 // 3 = CEX2A support
37 37
38/** 38/**
39 * struct ica_rsa_modexpo 39 * struct ica_rsa_modexpo
@@ -122,6 +122,9 @@ struct ica_rsa_modexpo_crt {
122 * Z90STAT_CEX2CCOUNT 122 * Z90STAT_CEX2CCOUNT
123 * Return an integer count of all CEX2Cs. 123 * Return an integer count of all CEX2Cs.
124 * 124 *
125 * Z90STAT_CEX2ACOUNT
126 * Return an integer count of all CEX2As.
127 *
125 * Z90STAT_REQUESTQ_COUNT 128 * Z90STAT_REQUESTQ_COUNT
126 * Return an integer count of the number of entries waiting to be 129 * Return an integer count of the number of entries waiting to be
127 * sent to a device. 130 * sent to a device.
@@ -144,6 +147,7 @@ struct ica_rsa_modexpo_crt {
144 * 0x03: PCIXCC_MCL2 147 * 0x03: PCIXCC_MCL2
145 * 0x04: PCIXCC_MCL3 148 * 0x04: PCIXCC_MCL3
146 * 0x05: CEX2C 149 * 0x05: CEX2C
150 * 0x06: CEX2A
147 * 0x0d: device is disabled via the proc filesystem 151 * 0x0d: device is disabled via the proc filesystem
148 * 152 *
149 * Z90STAT_QDEPTH_MASK 153 * Z90STAT_QDEPTH_MASK
@@ -199,6 +203,7 @@ struct ica_rsa_modexpo_crt {
199#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int) 203#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int)
200#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int) 204#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int)
201#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int) 205#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int)
206#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int)
202#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int) 207#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int)
203#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int) 208#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int)
204#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int) 209#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int)
diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c
index c215e0889736..d7f7494a0cbe 100644
--- a/drivers/s390/crypto/z90hardware.c
+++ b/drivers/s390/crypto/z90hardware.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * linux/drivers/s390/crypto/z90hardware.c 2 * linux/drivers/s390/crypto/z90hardware.c
3 * 3 *
4 * z90crypt 1.3.2 4 * z90crypt 1.3.3
5 * 5 *
6 * Copyright (C) 2001, 2004 IBM Corporation 6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com) 7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com) 8 * Eric Rossman (edrossma@us.ibm.com)
9 * 9 *
@@ -648,6 +648,87 @@ static struct cca_public_sec static_cca_pub_sec = {
648#define RESPONSE_CPRB_SIZE 0x000006B8 648#define RESPONSE_CPRB_SIZE 0x000006B8
649#define RESPONSE_CPRBX_SIZE 0x00000724 649#define RESPONSE_CPRBX_SIZE 0x00000724
650 650
651struct type50_hdr {
652 u8 reserved1;
653 u8 msg_type_code;
654 u16 msg_len;
655 u8 reserved2;
656 u8 ignored;
657 u16 reserved3;
658};
659
660#define TYPE50_TYPE_CODE 0x50
661
662#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg))
663#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg))
664#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg))
665#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg))
666
667#define TYPE50_MEB1_FMT 0x0001
668#define TYPE50_MEB2_FMT 0x0002
669#define TYPE50_CRB1_FMT 0x0011
670#define TYPE50_CRB2_FMT 0x0012
671
672struct type50_meb1_msg {
673 struct type50_hdr header;
674 u16 keyblock_type;
675 u8 reserved[6];
676 u8 exponent[128];
677 u8 modulus[128];
678 u8 message[128];
679};
680
681struct type50_meb2_msg {
682 struct type50_hdr header;
683 u16 keyblock_type;
684 u8 reserved[6];
685 u8 exponent[256];
686 u8 modulus[256];
687 u8 message[256];
688};
689
690struct type50_crb1_msg {
691 struct type50_hdr header;
692 u16 keyblock_type;
693 u8 reserved[6];
694 u8 p[64];
695 u8 q[64];
696 u8 dp[64];
697 u8 dq[64];
698 u8 u[64];
699 u8 message[128];
700};
701
702struct type50_crb2_msg {
703 struct type50_hdr header;
704 u16 keyblock_type;
705 u8 reserved[6];
706 u8 p[128];
707 u8 q[128];
708 u8 dp[128];
709 u8 dq[128];
710 u8 u[128];
711 u8 message[256];
712};
713
714union type50_msg {
715 struct type50_meb1_msg meb1;
716 struct type50_meb2_msg meb2;
717 struct type50_crb1_msg crb1;
718 struct type50_crb2_msg crb2;
719};
720
721struct type80_hdr {
722 u8 reserved1;
723 u8 type;
724 u16 len;
725 u8 code;
726 u8 reserved2[3];
727 u8 reserved3[8];
728};
729
730#define TYPE80_RSP_CODE 0x80
731
651struct error_hdr { 732struct error_hdr {
652 unsigned char reserved1; 733 unsigned char reserved1;
653 unsigned char type; 734 unsigned char type;
@@ -657,6 +738,7 @@ struct error_hdr {
657}; 738};
658 739
659#define TYPE82_RSP_CODE 0x82 740#define TYPE82_RSP_CODE 0x82
741#define TYPE88_RSP_CODE 0x88
660 742
661#define REP82_ERROR_MACHINE_FAILURE 0x10 743#define REP82_ERROR_MACHINE_FAILURE 0x10
662#define REP82_ERROR_PREEMPT_FAILURE 0x12 744#define REP82_ERROR_PREEMPT_FAILURE 0x12
@@ -679,6 +761,22 @@ struct error_hdr {
679#define REP82_ERROR_PACKET_TRUNCATED 0xA0 761#define REP82_ERROR_PACKET_TRUNCATED 0xA0
680#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 762#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
681 763
764#define REP88_ERROR_MODULE_FAILURE 0x10
765#define REP88_ERROR_MODULE_TIMEOUT 0x11
766#define REP88_ERROR_MODULE_NOTINIT 0x13
767#define REP88_ERROR_MODULE_NOTAVAIL 0x14
768#define REP88_ERROR_MODULE_DISABLED 0x15
769#define REP88_ERROR_MODULE_IN_DIAGN 0x17
770#define REP88_ERROR_FASTPATH_DISABLD 0x19
771#define REP88_ERROR_MESSAGE_TYPE 0x20
772#define REP88_ERROR_MESSAGE_MALFORMD 0x22
773#define REP88_ERROR_MESSAGE_LENGTH 0x23
774#define REP88_ERROR_RESERVED_FIELD 0x24
775#define REP88_ERROR_KEY_TYPE 0x34
776#define REP88_ERROR_INVALID_KEY 0x82
777#define REP88_ERROR_OPERAND 0x84
778#define REP88_ERROR_OPERAND_EVEN_MOD 0x85
779
682#define CALLER_HEADER 12 780#define CALLER_HEADER 12
683 781
684static inline int 782static inline int
@@ -687,7 +785,7 @@ testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat)
687 int ccode; 785 int ccode;
688 786
689 asm volatile 787 asm volatile
690#ifdef __s390x__ 788#ifdef CONFIG_64BIT
691 (" llgfr 0,%4 \n" 789 (" llgfr 0,%4 \n"
692 " slgr 1,1 \n" 790 " slgr 1,1 \n"
693 " lgr 2,1 \n" 791 " lgr 2,1 \n"
@@ -757,7 +855,7 @@ resetq(int q_nr, struct ap_status_word *stat_p)
757 int ccode; 855 int ccode;
758 856
759 asm volatile 857 asm volatile
760#ifdef __s390x__ 858#ifdef CONFIG_64BIT
761 (" llgfr 0,%2 \n" 859 (" llgfr 0,%2 \n"
762 " lghi 1,1 \n" 860 " lghi 1,1 \n"
763 " sll 1,24 \n" 861 " sll 1,24 \n"
@@ -823,7 +921,7 @@ sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat)
823 int ccode; 921 int ccode;
824 922
825 asm volatile 923 asm volatile
826#ifdef __s390x__ 924#ifdef CONFIG_64BIT
827 (" lgr 6,%3 \n" 925 (" lgr 6,%3 \n"
828 " llgfr 7,%2 \n" 926 " llgfr 7,%2 \n"
829 " llgt 0,0(6) \n" 927 " llgt 0,0(6) \n"
@@ -902,7 +1000,7 @@ rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id,
902 int ccode; 1000 int ccode;
903 1001
904 asm volatile 1002 asm volatile
905#ifdef __s390x__ 1003#ifdef CONFIG_64BIT
906 (" llgfr 0,%2 \n" 1004 (" llgfr 0,%2 \n"
907 " lgr 3,%4 \n" 1005 " lgr 3,%4 \n"
908 " lgr 6,%3 \n" 1006 " lgr 6,%3 \n"
@@ -1029,10 +1127,6 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1029 stat = HD_ONLINE; 1127 stat = HD_ONLINE;
1030 *q_depth = t_depth + 1; 1128 *q_depth = t_depth + 1;
1031 switch (t_dev_type) { 1129 switch (t_dev_type) {
1032 case OTHER_HW:
1033 stat = HD_NOT_THERE;
1034 *dev_type = NILDEV;
1035 break;
1036 case PCICA_HW: 1130 case PCICA_HW:
1037 *dev_type = PCICA; 1131 *dev_type = PCICA;
1038 break; 1132 break;
@@ -1045,6 +1139,9 @@ query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type)
1045 case CEX2C_HW: 1139 case CEX2C_HW:
1046 *dev_type = CEX2C; 1140 *dev_type = CEX2C;
1047 break; 1141 break;
1142 case CEX2A_HW:
1143 *dev_type = CEX2A;
1144 break;
1048 default: 1145 default:
1049 *dev_type = NILDEV; 1146 *dev_type = NILDEV;
1050 break; 1147 break;
@@ -2029,6 +2126,177 @@ ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx,
2029 return 0; 2126 return 0;
2030} 2127}
2031 2128
2129static int
2130ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p,
2131 union type50_msg *z90cMsg_p)
2132{
2133 int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len;
2134 unsigned char *mod_tgt, *exp_tgt, *inp_tgt;
2135 union type50_msg *tmp_type50_msg;
2136
2137 mod_len = icaMex_p->inputdatalength;
2138
2139 msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) +
2140 CALLER_HEADER;
2141
2142 memset(z90cMsg_p, 0, msg_size);
2143
2144 tmp_type50_msg = (union type50_msg *)
2145 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2146
2147 tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE;
2148
2149 if (mod_len <= 128) {
2150 tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN;
2151 tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT;
2152 mod_tgt = tmp_type50_msg->meb1.modulus;
2153 mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus);
2154 exp_tgt = tmp_type50_msg->meb1.exponent;
2155 exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent);
2156 inp_tgt = tmp_type50_msg->meb1.message;
2157 inp_tgt_len = sizeof(tmp_type50_msg->meb1.message);
2158 } else {
2159 tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN;
2160 tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT;
2161 mod_tgt = tmp_type50_msg->meb2.modulus;
2162 mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus);
2163 exp_tgt = tmp_type50_msg->meb2.exponent;
2164 exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent);
2165 inp_tgt = tmp_type50_msg->meb2.message;
2166 inp_tgt_len = sizeof(tmp_type50_msg->meb2.message);
2167 }
2168
2169 mod_tgt += (mod_tgt_len - mod_len);
2170 if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len))
2171 return SEN_RELEASED;
2172 if (is_empty(mod_tgt, mod_len))
2173 return SEN_USER_ERROR;
2174 exp_tgt += (exp_tgt_len - mod_len);
2175 if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len))
2176 return SEN_RELEASED;
2177 if (is_empty(exp_tgt, mod_len))
2178 return SEN_USER_ERROR;
2179 inp_tgt += (inp_tgt_len - mod_len);
2180 if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len))
2181 return SEN_RELEASED;
2182 if (is_empty(inp_tgt, mod_len))
2183 return SEN_USER_ERROR;
2184
2185 *z90cMsg_l_p = msg_size - CALLER_HEADER;
2186
2187 return 0;
2188}
2189
2190static int
2191ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p,
2192 int *z90cMsg_l_p, union type50_msg *z90cMsg_p)
2193{
2194 int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len,
2195 dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset;
2196 unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt,
2197 temp[8];
2198 union type50_msg *tmp_type50_msg;
2199
2200 mod_len = icaMsg_p->inputdatalength;
2201 short_len = mod_len / 2;
2202 long_len = mod_len / 2 + 8;
2203 long_offset = 0;
2204
2205 if (long_len > 128) {
2206 memset(temp, 0x00, sizeof(temp));
2207 if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128))
2208 return SEN_RELEASED;
2209 if (!is_empty(temp, 8))
2210 return SEN_NOT_AVAIL;
2211 if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128))
2212 return SEN_RELEASED;
2213 if (!is_empty(temp, 8))
2214 return SEN_NOT_AVAIL;
2215 if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128))
2216 return SEN_RELEASED;
2217 if (!is_empty(temp, 8))
2218 return SEN_NOT_AVAIL;
2219 long_offset = long_len - 128;
2220 long_len = 128;
2221 }
2222
2223 tmp_size = ((mod_len <= 128) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) +
2224 CALLER_HEADER;
2225
2226 memset(z90cMsg_p, 0, tmp_size);
2227
2228 tmp_type50_msg = (union type50_msg *)
2229 ((unsigned char *) z90cMsg_p + CALLER_HEADER);
2230
2231 tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE;
2232 if (long_len <= 64) {
2233 tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN;
2234 tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT;
2235 p_tgt = tmp_type50_msg->crb1.p;
2236 p_tgt_len = sizeof(tmp_type50_msg->crb1.p);
2237 q_tgt = tmp_type50_msg->crb1.q;
2238 q_tgt_len = sizeof(tmp_type50_msg->crb1.q);
2239 dp_tgt = tmp_type50_msg->crb1.dp;
2240 dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp);
2241 dq_tgt = tmp_type50_msg->crb1.dq;
2242 dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq);
2243 u_tgt = tmp_type50_msg->crb1.u;
2244 u_tgt_len = sizeof(tmp_type50_msg->crb1.u);
2245 inp_tgt = tmp_type50_msg->crb1.message;
2246 inp_tgt_len = sizeof(tmp_type50_msg->crb1.message);
2247 } else {
2248 tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN;
2249 tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT;
2250 p_tgt = tmp_type50_msg->crb2.p;
2251 p_tgt_len = sizeof(tmp_type50_msg->crb2.p);
2252 q_tgt = tmp_type50_msg->crb2.q;
2253 q_tgt_len = sizeof(tmp_type50_msg->crb2.q);
2254 dp_tgt = tmp_type50_msg->crb2.dp;
2255 dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp);
2256 dq_tgt = tmp_type50_msg->crb2.dq;
2257 dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq);
2258 u_tgt = tmp_type50_msg->crb2.u;
2259 u_tgt_len = sizeof(tmp_type50_msg->crb2.u);
2260 inp_tgt = tmp_type50_msg->crb2.message;
2261 inp_tgt_len = sizeof(tmp_type50_msg->crb2.message);
2262 }
2263
2264 p_tgt += (p_tgt_len - long_len);
2265 if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len))
2266 return SEN_RELEASED;
2267 if (is_empty(p_tgt, long_len))
2268 return SEN_USER_ERROR;
2269 q_tgt += (q_tgt_len - short_len);
2270 if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len))
2271 return SEN_RELEASED;
2272 if (is_empty(q_tgt, short_len))
2273 return SEN_USER_ERROR;
2274 dp_tgt += (dp_tgt_len - long_len);
2275 if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len))
2276 return SEN_RELEASED;
2277 if (is_empty(dp_tgt, long_len))
2278 return SEN_USER_ERROR;
2279 dq_tgt += (dq_tgt_len - short_len);
2280 if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len))
2281 return SEN_RELEASED;
2282 if (is_empty(dq_tgt, short_len))
2283 return SEN_USER_ERROR;
2284 u_tgt += (u_tgt_len - long_len);
2285 if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len))
2286 return SEN_RELEASED;
2287 if (is_empty(u_tgt, long_len))
2288 return SEN_USER_ERROR;
2289 inp_tgt += (inp_tgt_len - mod_len);
2290 if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len))
2291 return SEN_RELEASED;
2292 if (is_empty(inp_tgt, mod_len))
2293 return SEN_USER_ERROR;
2294
2295 *z90cMsg_l_p = tmp_size - CALLER_HEADER;
2296
2297 return 0;
2298}
2299
2032int 2300int
2033convert_request(unsigned char *buffer, int func, unsigned short function, 2301convert_request(unsigned char *buffer, int func, unsigned short function,
2034 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p) 2302 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p)
@@ -2071,6 +2339,16 @@ convert_request(unsigned char *buffer, int func, unsigned short function,
2071 cdx, msg_l_p, (struct type6_msg *) msg_p, 2339 cdx, msg_l_p, (struct type6_msg *) msg_p,
2072 dev_type); 2340 dev_type);
2073 } 2341 }
2342 if (dev_type == CEX2A) {
2343 if (func == ICARSACRT)
2344 return ICACRT_msg_to_type50CRT_msg(
2345 (struct ica_rsa_modexpo_crt *) buffer,
2346 msg_l_p, (union type50_msg *) msg_p);
2347 else
2348 return ICAMEX_msg_to_type50MEX_msg(
2349 (struct ica_rsa_modexpo *) buffer,
2350 msg_l_p, (union type50_msg *) msg_p);
2351 }
2074 2352
2075 return 0; 2353 return 0;
2076} 2354}
@@ -2081,8 +2359,8 @@ unset_ext_bitlens(void)
2081{ 2359{
2082 if (!ext_bitlens_msg_count) { 2360 if (!ext_bitlens_msg_count) {
2083 PRINTK("Unable to use coprocessors for extended bitlengths. " 2361 PRINTK("Unable to use coprocessors for extended bitlengths. "
2084 "Using PCICAs (if present) for extended bitlengths. " 2362 "Using PCICAs/CEX2As (if present) for extended "
2085 "This is not an error.\n"); 2363 "bitlengths. This is not an error.\n");
2086 ext_bitlens_msg_count++; 2364 ext_bitlens_msg_count++;
2087 } 2365 }
2088 ext_bitlens = 0; 2366 ext_bitlens = 0;
@@ -2094,6 +2372,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2094{ 2372{
2095 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer; 2373 struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer;
2096 struct error_hdr *errh_p = (struct error_hdr *) response; 2374 struct error_hdr *errh_p = (struct error_hdr *) response;
2375 struct type80_hdr *t80h_p = (struct type80_hdr *) response;
2097 struct type84_hdr *t84h_p = (struct type84_hdr *) response; 2376 struct type84_hdr *t84h_p = (struct type84_hdr *) response;
2098 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response; 2377 struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response;
2099 int reply_code, service_rc, service_rs, src_l; 2378 int reply_code, service_rc, service_rs, src_l;
@@ -2108,6 +2387,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2108 src_l = 0; 2387 src_l = 0;
2109 switch (errh_p->type) { 2388 switch (errh_p->type) {
2110 case TYPE82_RSP_CODE: 2389 case TYPE82_RSP_CODE:
2390 case TYPE88_RSP_CODE:
2111 reply_code = errh_p->reply_code; 2391 reply_code = errh_p->reply_code;
2112 src_p = (unsigned char *)errh_p; 2392 src_p = (unsigned char *)errh_p;
2113 PRINTK("Hardware error: Type %02X Message Header: " 2393 PRINTK("Hardware error: Type %02X Message Header: "
@@ -2116,6 +2396,10 @@ convert_response(unsigned char *response, unsigned char *buffer,
2116 src_p[0], src_p[1], src_p[2], src_p[3], 2396 src_p[0], src_p[1], src_p[2], src_p[3],
2117 src_p[4], src_p[5], src_p[6], src_p[7]); 2397 src_p[4], src_p[5], src_p[6], src_p[7]);
2118 break; 2398 break;
2399 case TYPE80_RSP_CODE:
2400 src_l = icaMsg_p->outputdatalength;
2401 src_p = response + (int)t80h_p->len - src_l;
2402 break;
2119 case TYPE84_RSP_CODE: 2403 case TYPE84_RSP_CODE:
2120 src_l = icaMsg_p->outputdatalength; 2404 src_l = icaMsg_p->outputdatalength;
2121 src_p = response + (int)t84h_p->len - src_l; 2405 src_p = response + (int)t84h_p->len - src_l;
@@ -2202,6 +2486,7 @@ convert_response(unsigned char *response, unsigned char *buffer,
2202 if (reply_code) 2486 if (reply_code)
2203 switch (reply_code) { 2487 switch (reply_code) {
2204 case REP82_ERROR_OPERAND_INVALID: 2488 case REP82_ERROR_OPERAND_INVALID:
2489 case REP88_ERROR_MESSAGE_MALFORMD:
2205 return REC_OPERAND_INV; 2490 return REC_OPERAND_INV;
2206 case REP82_ERROR_OPERAND_SIZE: 2491 case REP82_ERROR_OPERAND_SIZE:
2207 return REC_OPERAND_SIZE; 2492 return REC_OPERAND_SIZE;
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
index 4010f2bb85af..135ae04e6e75 100644
--- a/drivers/s390/crypto/z90main.c
+++ b/drivers/s390/crypto/z90main.c
@@ -34,7 +34,6 @@
34#include <linux/miscdevice.h> 34#include <linux/miscdevice.h>
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/moduleparam.h> 36#include <linux/moduleparam.h>
37#include <linux/kobject_uevent.h>
38#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
39#include <linux/syscalls.h> 38#include <linux/syscalls.h>
40#include "z90crypt.h" 39#include "z90crypt.h"
@@ -229,7 +228,7 @@ struct device_x {
229 */ 228 */
230struct device { 229struct device {
231 int dev_type; // PCICA, PCICC, PCIXCC_MCL2, 230 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
232 // PCIXCC_MCL3, CEX2C 231 // PCIXCC_MCL3, CEX2C, CEX2A
233 enum devstat dev_stat; // current device status 232 enum devstat dev_stat; // current device status
234 int dev_self_x; // Index in array 233 int dev_self_x; // Index in array
235 int disabled; // Set when device is in error 234 int disabled; // Set when device is in error
@@ -296,26 +295,30 @@ struct caller {
296/** 295/**
297 * Function prototypes from z90hardware.c 296 * Function prototypes from z90hardware.c
298 */ 297 */
299enum hdstat query_online(int, int, int, int *, int *); 298enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
300enum devstat reset_device(int, int, int); 299 int *dev_type);
301enum devstat send_to_AP(int, int, int, unsigned char *); 300enum devstat reset_device(int deviceNr, int cdx, int resetNr);
302enum devstat receive_from_AP(int, int, int, unsigned char *, unsigned char *); 301enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
303int convert_request(unsigned char *, int, short, int, int, int *, 302enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
304 unsigned char *); 303 unsigned char *resp, unsigned char *psmid);
305int convert_response(unsigned char *, unsigned char *, int *, unsigned char *); 304int convert_request(unsigned char *buffer, int func, unsigned short function,
305 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
306int convert_response(unsigned char *response, unsigned char *buffer,
307 int *respbufflen_p, unsigned char *resp_buff);
306 308
307/** 309/**
308 * Low level function prototypes 310 * Low level function prototypes
309 */ 311 */
310static int create_z90crypt(int *); 312static int create_z90crypt(int *cdx_p);
311static int refresh_z90crypt(int *); 313static int refresh_z90crypt(int *cdx_p);
312static int find_crypto_devices(struct status *); 314static int find_crypto_devices(struct status *deviceMask);
313static int create_crypto_device(int); 315static int create_crypto_device(int index);
314static int destroy_crypto_device(int); 316static int destroy_crypto_device(int index);
315static void destroy_z90crypt(void); 317static void destroy_z90crypt(void);
316static int refresh_index_array(struct status *, struct device_x *); 318static int refresh_index_array(struct status *status_str,
317static int probe_device_type(struct device *); 319 struct device_x *index_array);
318static int probe_PCIXCC_type(struct device *); 320static int probe_device_type(struct device *devPtr);
321static int probe_PCIXCC_type(struct device *devPtr);
319 322
320/** 323/**
321 * proc fs definitions 324 * proc fs definitions
@@ -426,7 +429,7 @@ static struct miscdevice z90crypt_misc_device = {
426MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman" 429MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
427 "and Jochen Roehrig"); 430 "and Jochen Roehrig");
428MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, " 431MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
429 "Copyright 2001, 2004 IBM Corporation"); 432 "Copyright 2001, 2005 IBM Corporation");
430MODULE_LICENSE("GPL"); 433MODULE_LICENSE("GPL");
431module_param(domain, int, 0); 434module_param(domain, int, 0);
432MODULE_PARM_DESC(domain, "domain index for device"); 435MODULE_PARM_DESC(domain, "domain index for device");
@@ -861,6 +864,12 @@ get_status_CEX2Ccount(void)
861} 864}
862 865
863static inline int 866static inline int
867get_status_CEX2Acount(void)
868{
869 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
870}
871
872static inline int
864get_status_requestq_count(void) 873get_status_requestq_count(void)
865{ 874{
866 return requestq_count; 875 return requestq_count;
@@ -1009,11 +1018,13 @@ static inline int
1009select_device_type(int *dev_type_p, int bytelength) 1018select_device_type(int *dev_type_p, int bytelength)
1010{ 1019{
1011 static int count = 0; 1020 static int count = 0;
1012 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, index_to_use; 1021 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1022 index_to_use;
1013 struct status *stat; 1023 struct status *stat;
1014 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) && 1024 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1015 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) && 1025 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1016 (*dev_type_p != CEX2C) && (*dev_type_p != ANYDEV)) 1026 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1027 (*dev_type_p != ANYDEV))
1017 return -1; 1028 return -1;
1018 if (*dev_type_p != ANYDEV) { 1029 if (*dev_type_p != ANYDEV) {
1019 stat = &z90crypt.hdware_info->type_mask[*dev_type_p]; 1030 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
@@ -1023,7 +1034,13 @@ select_device_type(int *dev_type_p, int bytelength)
1023 return -1; 1034 return -1;
1024 } 1035 }
1025 1036
1026 /* Assumption: PCICA, PCIXCC_MCL3, and CEX2C are all similar in speed */ 1037 /**
1038 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1039 * speed.
1040 *
1041 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1042 * other present.
1043 */
1027 stat = &z90crypt.hdware_info->type_mask[PCICA]; 1044 stat = &z90crypt.hdware_info->type_mask[PCICA];
1028 PCICA_avail = stat->st_count - 1045 PCICA_avail = stat->st_count -
1029 (stat->disabled_count + stat->user_disabled_count); 1046 (stat->disabled_count + stat->user_disabled_count);
@@ -1033,29 +1050,38 @@ select_device_type(int *dev_type_p, int bytelength)
1033 stat = &z90crypt.hdware_info->type_mask[CEX2C]; 1050 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1034 CEX2C_avail = stat->st_count - 1051 CEX2C_avail = stat->st_count -
1035 (stat->disabled_count + stat->user_disabled_count); 1052 (stat->disabled_count + stat->user_disabled_count);
1036 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail) { 1053 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1054 CEX2A_avail = stat->st_count -
1055 (stat->disabled_count + stat->user_disabled_count);
1056 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1037 /** 1057 /**
1038 * bitlength is a factor, PCICA is the most capable, even with 1058 * bitlength is a factor, PCICA or CEX2A are the most capable,
1039 * the new MCL for PCIXCC. 1059 * even with the new MCL for PCIXCC.
1040 */ 1060 */
1041 if ((bytelength < PCIXCC_MIN_MOD_SIZE) || 1061 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1042 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { 1062 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1043 if (!PCICA_avail) 1063 if (PCICA_avail) {
1044 return -1;
1045 else {
1046 *dev_type_p = PCICA; 1064 *dev_type_p = PCICA;
1047 return 0; 1065 return 0;
1048 } 1066 }
1067 if (CEX2A_avail) {
1068 *dev_type_p = CEX2A;
1069 return 0;
1070 }
1071 return -1;
1049 } 1072 }
1050 1073
1051 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail + 1074 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1052 CEX2C_avail); 1075 CEX2C_avail + CEX2A_avail);
1053 if (index_to_use < PCICA_avail) 1076 if (index_to_use < PCICA_avail)
1054 *dev_type_p = PCICA; 1077 *dev_type_p = PCICA;
1055 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail)) 1078 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1056 *dev_type_p = PCIXCC_MCL3; 1079 *dev_type_p = PCIXCC_MCL3;
1057 else 1080 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1081 CEX2C_avail))
1058 *dev_type_p = CEX2C; 1082 *dev_type_p = CEX2C;
1083 else
1084 *dev_type_p = CEX2A;
1059 count++; 1085 count++;
1060 return 0; 1086 return 0;
1061 } 1087 }
@@ -1360,7 +1386,7 @@ build_caller(struct work_element *we_p, short function)
1360 1386
1361 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) && 1387 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1362 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1388 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1363 (we_p->devtype != CEX2C)) 1389 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1364 return SEN_NOT_AVAIL; 1390 return SEN_NOT_AVAIL;
1365 1391
1366 memcpy(caller_p->caller_id, we_p->caller_id, 1392 memcpy(caller_p->caller_id, we_p->caller_id,
@@ -1429,7 +1455,8 @@ get_crypto_request_buffer(struct work_element *we_p)
1429 1455
1430 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) && 1456 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1431 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && 1457 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1432 (we_p->devtype != CEX2C) && (we_p->devtype != ANYDEV)) { 1458 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1459 (we_p->devtype != ANYDEV)) {
1433 PRINTK("invalid device type\n"); 1460 PRINTK("invalid device type\n");
1434 return SEN_USER_ERROR; 1461 return SEN_USER_ERROR;
1435 } 1462 }
@@ -1504,8 +1531,9 @@ get_crypto_request_buffer(struct work_element *we_p)
1504 1531
1505 function = PCI_FUNC_KEY_ENCRYPT; 1532 function = PCI_FUNC_KEY_ENCRYPT;
1506 switch (we_p->devtype) { 1533 switch (we_p->devtype) {
1507 /* PCICA does everything with a simple RSA mod-expo operation */ 1534 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1508 case PCICA: 1535 case PCICA:
1536 case CEX2A:
1509 function = PCI_FUNC_KEY_ENCRYPT; 1537 function = PCI_FUNC_KEY_ENCRYPT;
1510 break; 1538 break;
1511 /** 1539 /**
@@ -1663,7 +1691,8 @@ z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1663 * trigger a fallback to software. 1691 * trigger a fallback to software.
1664 */ 1692 */
1665 case -EINVAL: 1693 case -EINVAL:
1666 if (we_p->devtype != PCICA) 1694 if ((we_p->devtype != PCICA) &&
1695 (we_p->devtype != CEX2A))
1667 rv = -EGETBUFF; 1696 rv = -EGETBUFF;
1668 break; 1697 break;
1669 case -ETIMEOUT: 1698 case -ETIMEOUT:
@@ -1780,6 +1809,12 @@ z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1780 ret = -EFAULT; 1809 ret = -EFAULT;
1781 break; 1810 break;
1782 1811
1812 case Z90STAT_CEX2ACOUNT:
1813 tempstat = get_status_CEX2Acount();
1814 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1815 ret = -EFAULT;
1816 break;
1817
1783 case Z90STAT_REQUESTQ_COUNT: 1818 case Z90STAT_REQUESTQ_COUNT:
1784 tempstat = get_status_requestq_count(); 1819 tempstat = get_status_requestq_count();
1785 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) 1820 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
@@ -2020,6 +2055,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
2020 get_status_PCIXCCMCL3count()); 2055 get_status_PCIXCCMCL3count());
2021 len += sprintf(resp_buff+len, "CEX2C count: %d\n", 2056 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2022 get_status_CEX2Ccount()); 2057 get_status_CEX2Ccount());
2058 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2059 get_status_CEX2Acount());
2023 len += sprintf(resp_buff+len, "requestq count: %d\n", 2060 len += sprintf(resp_buff+len, "requestq count: %d\n",
2024 get_status_requestq_count()); 2061 get_status_requestq_count());
2025 len += sprintf(resp_buff+len, "pendingq count: %d\n", 2062 len += sprintf(resp_buff+len, "pendingq count: %d\n",
@@ -2027,8 +2064,8 @@ z90crypt_status(char *resp_buff, char **start, off_t offset,
2027 len += sprintf(resp_buff+len, "Total open handles: %d\n\n", 2064 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2028 get_status_totalopen_count()); 2065 get_status_totalopen_count());
2029 len += sprinthx( 2066 len += sprinthx(
2030 "Online devices: 1: PCICA, 2: PCICC, 3: PCIXCC (MCL2), " 2067 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2031 "4: PCIXCC (MCL3), 5: CEX2C", 2068 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2032 resp_buff+len, 2069 resp_buff+len,
2033 get_status_status_mask(workarea), 2070 get_status_status_mask(workarea),
2034 Z90CRYPT_NUM_APS); 2071 Z90CRYPT_NUM_APS);
@@ -2141,6 +2178,7 @@ z90crypt_status_write(struct file *file, const char __user *buffer,
2141 case '3': // PCIXCC_MCL2 2178 case '3': // PCIXCC_MCL2
2142 case '4': // PCIXCC_MCL3 2179 case '4': // PCIXCC_MCL3
2143 case '5': // CEX2C 2180 case '5': // CEX2C
2181 case '6': // CEX2A
2144 j++; 2182 j++;
2145 break; 2183 break;
2146 case 'd': 2184 case 'd':
@@ -3008,7 +3046,9 @@ create_crypto_device(int index)
3008 z90crypt.hdware_info->device_type_array[index] = 4; 3046 z90crypt.hdware_info->device_type_array[index] = 4;
3009 else if (deviceType == CEX2C) 3047 else if (deviceType == CEX2C)
3010 z90crypt.hdware_info->device_type_array[index] = 5; 3048 z90crypt.hdware_info->device_type_array[index] = 5;
3011 else 3049 else if (deviceType == CEX2A)
3050 z90crypt.hdware_info->device_type_array[index] = 6;
3051 else // No idea how this would happen.
3012 z90crypt.hdware_info->device_type_array[index] = -1; 3052 z90crypt.hdware_info->device_type_array[index] = -1;
3013 } 3053 }
3014 3054
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index a7efc394515e..548854754921 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -1,5 +1,5 @@
1menu "S/390 network device drivers" 1menu "S/390 network device drivers"
2 depends on NETDEVICES && ARCH_S390 2 depends on NETDEVICES && S390
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 tristate "Lan Channel Station Interface"
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 6b63d21612ec..e70af7f39946 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1603,7 +1603,7 @@ dumpit(char* buf, int len)
1603 __u32 ct, sw, rm, dup; 1603 __u32 ct, sw, rm, dup;
1604 char *ptr, *rptr; 1604 char *ptr, *rptr;
1605 char tbuf[82], tdup[82]; 1605 char tbuf[82], tdup[82];
1606#if (CONFIG_ARCH_S390X) 1606#if (CONFIG_64BIT)
1607 char addr[22]; 1607 char addr[22];
1608#else 1608#else
1609 char addr[12]; 1609 char addr[12];
@@ -1619,7 +1619,7 @@ dumpit(char* buf, int len)
1619 dup = 0; 1619 dup = 0;
1620 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) { 1620 for ( ct=0; ct < len; ct++, ptr++, rptr++ ) {
1621 if (sw == 0) { 1621 if (sw == 0) {
1622#if (CONFIG_ARCH_S390X) 1622#if (CONFIG_64BIT)
1623 sprintf(addr, "%16.16lX",(unsigned long)rptr); 1623 sprintf(addr, "%16.16lX",(unsigned long)rptr);
1624#else 1624#else
1625 sprintf(addr, "%8.8X",(__u32)rptr); 1625 sprintf(addr, "%8.8X",(__u32)rptr);
@@ -1634,7 +1634,7 @@ dumpit(char* buf, int len)
1634 if (sw == 8) { 1634 if (sw == 8) {
1635 strcat(bhex, " "); 1635 strcat(bhex, " ");
1636 } 1636 }
1637#if (CONFIG_ARCH_S390X) 1637#if (CONFIG_64BIT)
1638 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr); 1638 sprintf(tbuf,"%2.2lX", (unsigned long)*ptr);
1639#else 1639#else
1640 sprintf(tbuf,"%2.2X", (__u32)*ptr); 1640 sprintf(tbuf,"%2.2X", (__u32)*ptr);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index 0075894c71db..77dacb465732 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: cu3088.c,v 1.35 2005/03/30 19:28:52 richtera Exp $ 2 * $Id: cu3088.c,v 1.36 2005/10/25 14:37:17 cohuck Exp $
3 * 3 *
4 * CTC / LCS ccw_device driver 4 * CTC / LCS ccw_device driver
5 * 5 *
@@ -27,6 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/err.h> 28#include <linux/err.h>
29 29
30#include <asm/s390_rdev.h>
30#include <asm/ccwdev.h> 31#include <asm/ccwdev.h>
31#include <asm/ccwgroup.h> 32#include <asm/ccwgroup.h>
32 33
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
index df7647c3c100..ea8177392564 100644
--- a/drivers/s390/net/iucv.c
+++ b/drivers/s390/net/iucv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: iucv.c,v 1.45 2005/04/26 22:59:06 braunu Exp $ 2 * $Id: iucv.c,v 1.47 2005/11/21 11:35:22 mschwide Exp $
3 * 3 *
4 * IUCV network driver 4 * IUCV network driver
5 * 5 *
@@ -29,7 +29,7 @@
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 * 31 *
32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.45 $ 32 * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.47 $
33 * 33 *
34 */ 34 */
35 35
@@ -54,7 +54,7 @@
54#include <asm/s390_ext.h> 54#include <asm/s390_ext.h>
55#include <asm/ebcdic.h> 55#include <asm/ebcdic.h>
56#include <asm/smp.h> 56#include <asm/smp.h>
57#include <asm/ccwdev.h> //for root device stuff 57#include <asm/s390_rdev.h>
58 58
59/* FLAGS: 59/* FLAGS:
60 * All flags are defined in the field IPFLAGS1 of each function 60 * All flags are defined in the field IPFLAGS1 of each function
@@ -355,7 +355,7 @@ do { \
355static void 355static void
356iucv_banner(void) 356iucv_banner(void)
357{ 357{
358 char vbuf[] = "$Revision: 1.45 $"; 358 char vbuf[] = "$Revision: 1.47 $";
359 char *version = vbuf; 359 char *version = vbuf;
360 360
361 if ((version = strchr(version, ':'))) { 361 if ((version = strchr(version, ':'))) {
@@ -477,7 +477,7 @@ grab_param(void)
477 ptr++; 477 ptr++;
478 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) 478 if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
479 ptr = iucv_param_pool; 479 ptr = iucv_param_pool;
480 } while (atomic_compare_and_swap(0, 1, &ptr->in_use)); 480 } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
481 hint = ptr - iucv_param_pool; 481 hint = ptr - iucv_param_pool;
482 482
483 memset(&ptr->param, 0, sizeof(ptr->param)); 483 memset(&ptr->param, 0, sizeof(ptr->param));
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index f8f55cc468ba..97f927c01a82 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -65,6 +65,7 @@
65#include <asm/timex.h> 65#include <asm/timex.h>
66#include <asm/semaphore.h> 66#include <asm/semaphore.h>
67#include <asm/uaccess.h> 67#include <asm/uaccess.h>
68#include <asm/s390_rdev.h>
68 69
69#include "qeth.h" 70#include "qeth.h"
70#include "qeth_mpc.h" 71#include "qeth_mpc.h"
@@ -1396,7 +1397,7 @@ qeth_idx_activate_get_answer(struct qeth_channel *channel,
1396 channel->ccw.cda = (__u32) __pa(iob->data); 1397 channel->ccw.cda = (__u32) __pa(iob->data);
1397 1398
1398 wait_event(card->wait_q, 1399 wait_event(card->wait_q,
1399 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); 1400 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1400 QETH_DBF_TEXT(setup, 6, "noirqpnd"); 1401 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1401 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1402 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1402 rc = ccw_device_start(channel->ccwdev, 1403 rc = ccw_device_start(channel->ccwdev,
@@ -1463,7 +1464,7 @@ qeth_idx_activate_channel(struct qeth_channel *channel,
1463 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); 1464 memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
1464 1465
1465 wait_event(card->wait_q, 1466 wait_event(card->wait_q,
1466 atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); 1467 atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
1467 QETH_DBF_TEXT(setup, 6, "noirqpnd"); 1468 QETH_DBF_TEXT(setup, 6, "noirqpnd");
1468 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); 1469 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1469 rc = ccw_device_start(channel->ccwdev, 1470 rc = ccw_device_start(channel->ccwdev,
@@ -1616,7 +1617,7 @@ qeth_issue_next_read(struct qeth_card *card)
1616 } 1617 }
1617 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); 1618 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
1618 wait_event(card->wait_q, 1619 wait_event(card->wait_q,
1619 atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); 1620 atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0);
1620 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1621 QETH_DBF_TEXT(trace, 6, "noirqpnd");
1621 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, 1622 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
1622 (addr_t) iob, 0, 0); 1623 (addr_t) iob, 0, 0);
@@ -1882,7 +1883,7 @@ qeth_send_control_data(struct qeth_card *card, int len,
1882 spin_unlock_irqrestore(&card->lock, flags); 1883 spin_unlock_irqrestore(&card->lock, flags);
1883 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1884 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN);
1884 wait_event(card->wait_q, 1885 wait_event(card->wait_q,
1885 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); 1886 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1886 qeth_prepare_control_data(card, len, iob); 1887 qeth_prepare_control_data(card, len, iob);
1887 if (IS_IPA(iob->data)) 1888 if (IS_IPA(iob->data))
1888 timer.expires = jiffies + QETH_IPA_TIMEOUT; 1889 timer.expires = jiffies + QETH_IPA_TIMEOUT;
@@ -1924,7 +1925,7 @@ qeth_osn_send_control_data(struct qeth_card *card, int len,
1924 QETH_DBF_TEXT(trace, 5, "osndctrd"); 1925 QETH_DBF_TEXT(trace, 5, "osndctrd");
1925 1926
1926 wait_event(card->wait_q, 1927 wait_event(card->wait_q,
1927 atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); 1928 atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0);
1928 qeth_prepare_control_data(card, len, iob); 1929 qeth_prepare_control_data(card, len, iob);
1929 QETH_DBF_TEXT(trace, 6, "osnoirqp"); 1930 QETH_DBF_TEXT(trace, 6, "osnoirqp");
1930 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1931 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
@@ -4236,9 +4237,8 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4236 QETH_DBF_TEXT(trace, 6, "dosndpfa"); 4237 QETH_DBF_TEXT(trace, 6, "dosndpfa");
4237 4238
4238 /* spin until we get the queue ... */ 4239 /* spin until we get the queue ... */
4239 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, 4240 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4240 QETH_OUT_Q_LOCKED, 4241 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4241 &queue->state));
4242 /* ... now we've got the queue */ 4242 /* ... now we've got the queue */
4243 index = queue->next_buf_to_fill; 4243 index = queue->next_buf_to_fill;
4244 buffer = &queue->bufs[queue->next_buf_to_fill]; 4244 buffer = &queue->bufs[queue->next_buf_to_fill];
@@ -4292,9 +4292,8 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4292 QETH_DBF_TEXT(trace, 6, "dosndpkt"); 4292 QETH_DBF_TEXT(trace, 6, "dosndpkt");
4293 4293
4294 /* spin until we get the queue ... */ 4294 /* spin until we get the queue ... */
4295 while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, 4295 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4296 QETH_OUT_Q_LOCKED, 4296 QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4297 &queue->state));
4298 start_index = queue->next_buf_to_fill; 4297 start_index = queue->next_buf_to_fill;
4299 buffer = &queue->bufs[queue->next_buf_to_fill]; 4298 buffer = &queue->bufs[queue->next_buf_to_fill];
4300 /* 4299 /*
diff --git a/drivers/s390/s390_rdev.c b/drivers/s390/s390_rdev.c
new file mode 100644
index 000000000000..566cc3d185b6
--- /dev/null
+++ b/drivers/s390/s390_rdev.c
@@ -0,0 +1,53 @@
1/*
2 * drivers/s390/s390_rdev.c
3 * s390 root device
4 * $Revision: 1.2 $
5 *
6 * Copyright (C) 2002, 2005 IBM Deutschland Entwicklung GmbH,
7 * IBM Corporation
8 * Author(s): Cornelia Huck (cohuck@de.ibm.com)
9 * Carsten Otte (cotte@de.ibm.com)
10 */
11
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/device.h>
15#include <asm/s390_rdev.h>
16
17static void
18s390_root_dev_release(struct device *dev)
19{
20 kfree(dev);
21}
22
23struct device *
24s390_root_dev_register(const char *name)
25{
26 struct device *dev;
27 int ret;
28
29 if (!strlen(name))
30 return ERR_PTR(-EINVAL);
31 dev = kmalloc(sizeof(struct device), GFP_KERNEL);
32 if (!dev)
33 return ERR_PTR(-ENOMEM);
34 memset(dev, 0, sizeof(struct device));
35 strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE));
36 dev->release = s390_root_dev_release;
37 ret = device_register(dev);
38 if (ret) {
39 kfree(dev);
40 return ERR_PTR(ret);
41 }
42 return dev;
43}
44
45void
46s390_root_dev_unregister(struct device *dev)
47{
48 if (dev)
49 device_unregister(dev);
50}
51
52EXPORT_SYMBOL(s390_root_dev_register);
53EXPORT_SYMBOL(s390_root_dev_unregister);
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index 4191fd9d4d11..3bf466603512 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -23,7 +23,7 @@
23 23
24static struct semaphore m_sem; 24static struct semaphore m_sem;
25 25
26extern int css_process_crw(int); 26extern int css_process_crw(int, int);
27extern int chsc_process_crw(void); 27extern int chsc_process_crw(void);
28extern int chp_process_crw(int, int); 28extern int chp_process_crw(int, int);
29extern void css_reiterate_subchannels(void); 29extern void css_reiterate_subchannels(void);
@@ -49,9 +49,10 @@ s390_handle_damage(char *msg)
49static int 49static int
50s390_collect_crw_info(void *param) 50s390_collect_crw_info(void *param)
51{ 51{
52 struct crw crw; 52 struct crw crw[2];
53 int ccode, ret, slow; 53 int ccode, ret, slow;
54 struct semaphore *sem; 54 struct semaphore *sem;
55 unsigned int chain;
55 56
56 sem = (struct semaphore *)param; 57 sem = (struct semaphore *)param;
57 /* Set a nice name. */ 58 /* Set a nice name. */
@@ -59,25 +60,50 @@ s390_collect_crw_info(void *param)
59repeat: 60repeat:
60 down_interruptible(sem); 61 down_interruptible(sem);
61 slow = 0; 62 slow = 0;
63 chain = 0;
62 while (1) { 64 while (1) {
63 ccode = stcrw(&crw); 65 if (unlikely(chain > 1)) {
66 struct crw tmp_crw;
67
68 printk(KERN_WARNING"%s: Code does not support more "
69 "than two chained crws; please report to "
70 "linux390@de.ibm.com!\n", __FUNCTION__);
71 ccode = stcrw(&tmp_crw);
72 printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
73 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
74 __FUNCTION__, tmp_crw.slct, tmp_crw.oflw,
75 tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
76 tmp_crw.erc, tmp_crw.rsid);
77 printk(KERN_WARNING"%s: This was crw number %x in the "
78 "chain\n", __FUNCTION__, chain);
79 if (ccode != 0)
80 break;
81 chain = tmp_crw.chn ? chain + 1 : 0;
82 continue;
83 }
84 ccode = stcrw(&crw[chain]);
64 if (ccode != 0) 85 if (ccode != 0)
65 break; 86 break;
66 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " 87 DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
67 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 88 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
68 crw.slct, crw.oflw, crw.chn, crw.rsc, crw.anc, 89 crw[chain].slct, crw[chain].oflw, crw[chain].chn,
69 crw.erc, crw.rsid); 90 crw[chain].rsc, crw[chain].anc, crw[chain].erc,
91 crw[chain].rsid);
70 /* Check for overflows. */ 92 /* Check for overflows. */
71 if (crw.oflw) { 93 if (crw[chain].oflw) {
72 pr_debug("%s: crw overflow detected!\n", __FUNCTION__); 94 pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
73 css_reiterate_subchannels(); 95 css_reiterate_subchannels();
96 chain = 0;
74 slow = 1; 97 slow = 1;
75 continue; 98 continue;
76 } 99 }
77 switch (crw.rsc) { 100 switch (crw[chain].rsc) {
78 case CRW_RSC_SCH: 101 case CRW_RSC_SCH:
79 pr_debug("source is subchannel %04X\n", crw.rsid); 102 if (crw[0].chn && !chain)
80 ret = css_process_crw (crw.rsid); 103 break;
104 pr_debug("source is subchannel %04X\n", crw[0].rsid);
105 ret = css_process_crw (crw[0].rsid,
106 chain ? crw[1].rsid : 0);
81 if (ret == -EAGAIN) 107 if (ret == -EAGAIN)
82 slow = 1; 108 slow = 1;
83 break; 109 break;
@@ -85,18 +111,18 @@ repeat:
85 pr_debug("source is monitoring facility\n"); 111 pr_debug("source is monitoring facility\n");
86 break; 112 break;
87 case CRW_RSC_CPATH: 113 case CRW_RSC_CPATH:
88 pr_debug("source is channel path %02X\n", crw.rsid); 114 pr_debug("source is channel path %02X\n", crw[0].rsid);
89 switch (crw.erc) { 115 switch (crw[0].erc) {
90 case CRW_ERC_IPARM: /* Path has come. */ 116 case CRW_ERC_IPARM: /* Path has come. */
91 ret = chp_process_crw(crw.rsid, 1); 117 ret = chp_process_crw(crw[0].rsid, 1);
92 break; 118 break;
93 case CRW_ERC_PERRI: /* Path has gone. */ 119 case CRW_ERC_PERRI: /* Path has gone. */
94 case CRW_ERC_PERRN: 120 case CRW_ERC_PERRN:
95 ret = chp_process_crw(crw.rsid, 0); 121 ret = chp_process_crw(crw[0].rsid, 0);
96 break; 122 break;
97 default: 123 default:
98 pr_debug("Don't know how to handle erc=%x\n", 124 pr_debug("Don't know how to handle erc=%x\n",
99 crw.erc); 125 crw[0].erc);
100 ret = 0; 126 ret = 0;
101 } 127 }
102 if (ret == -EAGAIN) 128 if (ret == -EAGAIN)
@@ -115,6 +141,8 @@ repeat:
115 pr_debug("unknown source\n"); 141 pr_debug("unknown source\n");
116 break; 142 break;
117 } 143 }
144 /* chain is always 0 or 1 here. */
145 chain = crw[chain].chn ? chain + 1 : 0;
118 } 146 }
119 if (slow) 147 if (slow)
120 queue_work(slow_path_wq, &slow_path_work); 148 queue_work(slow_path_wq, &slow_path_work);
@@ -218,7 +246,7 @@ s390_revalidate_registers(struct mci *mci)
218 */ 246 */
219 kill_task = 1; 247 kill_task = 1;
220 248
221#ifndef __s390x__ 249#ifndef CONFIG_64BIT
222 asm volatile("ld 0,0(%0)\n" 250 asm volatile("ld 0,0(%0)\n"
223 "ld 2,8(%0)\n" 251 "ld 2,8(%0)\n"
224 "ld 4,16(%0)\n" 252 "ld 4,16(%0)\n"
@@ -227,7 +255,7 @@ s390_revalidate_registers(struct mci *mci)
227#endif 255#endif
228 256
229 if (MACHINE_HAS_IEEE) { 257 if (MACHINE_HAS_IEEE) {
230#ifdef __s390x__ 258#ifdef CONFIG_64BIT
231 fpt_save_area = &S390_lowcore.floating_pt_save_area; 259 fpt_save_area = &S390_lowcore.floating_pt_save_area;
232 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; 260 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
233#else 261#else
@@ -286,7 +314,7 @@ s390_revalidate_registers(struct mci *mci)
286 */ 314 */
287 s390_handle_damage("invalid control registers."); 315 s390_handle_damage("invalid control registers.");
288 else 316 else
289#ifdef __s390x__ 317#ifdef CONFIG_64BIT
290 asm volatile("lctlg 0,15,0(%0)" 318 asm volatile("lctlg 0,15,0(%0)"
291 : : "a" (&S390_lowcore.cregs_save_area)); 319 : : "a" (&S390_lowcore.cregs_save_area));
292#else 320#else
@@ -299,7 +327,7 @@ s390_revalidate_registers(struct mci *mci)
299 * can't write something sensible into that register. 327 * can't write something sensible into that register.
300 */ 328 */
301 329
302#ifdef __s390x__ 330#ifdef CONFIG_64BIT
303 /* 331 /*
304 * See if we can revalidate the TOD programmable register with its 332 * See if we can revalidate the TOD programmable register with its
305 * old contents (should be zero) otherwise set it to zero. 333 * old contents (should be zero) otherwise set it to zero.
@@ -356,7 +384,7 @@ s390_do_machine_check(struct pt_regs *regs)
356 if (mci->b) { 384 if (mci->b) {
357 /* Processing backup -> verify if we can survive this */ 385 /* Processing backup -> verify if we can survive this */
358 u64 z_mcic, o_mcic, t_mcic; 386 u64 z_mcic, o_mcic, t_mcic;
359#ifdef __s390x__ 387#ifdef CONFIG_64BIT
360 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); 388 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
361 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | 389 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
362 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 390 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 87c2db1bd4f5..66da840c9316 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -106,7 +106,7 @@ static inline int stsi (void *sysinfo,
106{ 106{
107 int cc, retv; 107 int cc, retv;
108 108
109#ifndef CONFIG_ARCH_S390X 109#ifndef CONFIG_64BIT
110 __asm__ __volatile__ ( "lr\t0,%2\n" 110 __asm__ __volatile__ ( "lr\t0,%2\n"
111 "\tlr\t1,%3\n" 111 "\tlr\t1,%3\n"
112 "\tstsi\t0(%4)\n" 112 "\tstsi\t0(%4)\n"