aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/s390/Kconfig8
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_3990_erp.c5
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_diag.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c95
-rw-r--r--drivers/s390/block/dasd_eer.c24
-rw-r--r--drivers/s390/block/dasd_erp.c80
-rw-r--r--drivers/s390/block/dasd_fba.c4
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dasd_int.h1
-rw-r--r--drivers/s390/block/dasd_proc.c8
-rw-r--r--drivers/s390/block/dcssblk.c6
-rw-r--r--drivers/s390/char/Makefile4
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/defkeymap.c2
-rw-r--r--drivers/s390/char/fs3270.c4
-rw-r--r--drivers/s390/char/keyboard.c2
-rw-r--r--drivers/s390/char/monwriter.c4
-rw-r--r--drivers/s390/char/raw3270.c4
-rw-r--r--drivers/s390/char/sclp.c93
-rw-r--r--drivers/s390/char/sclp.h18
-rw-r--r--drivers/s390/char/sclp_con.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c2
-rw-r--r--drivers/s390/char/sclp_info.c57
-rw-r--r--drivers/s390/char/sclp_rw.c2
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c4
-rw-r--r--drivers/s390/char/tape.h22
-rw-r--r--drivers/s390/char/tape_3590.c479
-rw-r--r--drivers/s390/char/tape_3590.h53
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_char.c27
-rw-r--r--drivers/s390/char/tape_core.c69
-rw-r--r--drivers/s390/char/tty3270.c13
-rw-r--r--drivers/s390/char/vmlogrdr.c5
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/chsc.c270
-rw-r--r--drivers/s390/cio/chsc.h11
-rw-r--r--drivers/s390/cio/cio.c37
-rw-r--r--drivers/s390/cio/cmf.c4
-rw-r--r--drivers/s390/cio/css.c13
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_status.c8
-rw-r--r--drivers/s390/cio/qdio.c77
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/zcrypt_api.c20
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c8
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c3
-rw-r--r--drivers/s390/net/claw.c16
-rw-r--r--drivers/s390/net/ctcmain.c8
-rw-r--r--drivers/s390/net/cu3088.c2
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_eddp.c28
-rw-r--r--drivers/s390/net/qeth_main.c92
-rw-r--r--drivers/s390/net/qeth_sys.c30
-rw-r--r--drivers/s390/s390mach.c37
-rw-r--r--drivers/s390/s390mach.h3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c25
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c44
-rw-r--r--drivers/s390/scsi/zfcp_erp.c7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h4
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c38
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/s390/sysinfo.c63
75 files changed, 1341 insertions, 748 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 879250d3d069..ff8c4beaace4 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -51,6 +51,8 @@ config CRYPTO_DEV_PADLOCK_SHA
51 If unsure say M. The compiled module will be 51 If unsure say M. The compiled module will be
52 called padlock-sha.ko 52 called padlock-sha.ko
53 53
54source "arch/s390/crypto/Kconfig"
55
54config CRYPTO_DEV_GEODE 56config CRYPTO_DEV_GEODE
55 tristate "Support for the Geode LX AES engine" 57 tristate "Support for the Geode LX AES engine"
56 depends on CRYPTO && X86_32 && PCI 58 depends on CRYPTO && X86_32 && PCI
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig
index ae89b9b88743..165af398fdea 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/Kconfig
@@ -103,14 +103,8 @@ config CCW_CONSOLE
103 depends on TN3215_CONSOLE || TN3270_CONSOLE 103 depends on TN3215_CONSOLE || TN3270_CONSOLE
104 default y 104 default y
105 105
106config SCLP
107 bool "Support for SCLP"
108 help
109 Include support for the SCLP interface to the service element.
110
111config SCLP_TTY 106config SCLP_TTY
112 bool "Support for SCLP line mode terminal" 107 bool "Support for SCLP line mode terminal"
113 depends on SCLP
114 help 108 help
115 Include support for IBM SCLP line-mode terminals. 109 Include support for IBM SCLP line-mode terminals.
116 110
@@ -123,7 +117,6 @@ config SCLP_CONSOLE
123 117
124config SCLP_VT220_TTY 118config SCLP_VT220_TTY
125 bool "Support for SCLP VT220-compatible terminal" 119 bool "Support for SCLP VT220-compatible terminal"
126 depends on SCLP
127 help 120 help
128 Include support for an IBM SCLP VT220-compatible terminal. 121 Include support for an IBM SCLP VT220-compatible terminal.
129 122
@@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE
136 129
137config SCLP_CPI 130config SCLP_CPI
138 tristate "Control-Program Identification" 131 tristate "Control-Program Identification"
139 depends on SCLP
140 help 132 help
141 This option enables the hardware console interface for system 133 This option enables the hardware console interface for system
142 identification. This is commonly used for workload management and 134 identification. This is commonly used for workload management and
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 9803c9352d78..5a888704a8d0 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
6
5obj-y += s390mach.o sysinfo.o s390_rdev.o 7obj-y += s390mach.o sysinfo.o s390_rdev.o
6obj-y += cio/ block/ char/ crypto/ net/ scsi/ 8obj-y += cio/ block/ char/ crypto/ net/ scsi/
7 9
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 492b68bcd7cc..eb5dc62f0d9c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -37,6 +37,7 @@
37 */ 37 */
38debug_info_t *dasd_debug_area; 38debug_info_t *dasd_debug_area;
39struct dasd_discipline *dasd_diag_discipline_pointer; 39struct dasd_discipline *dasd_diag_discipline_pointer;
40void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
40 41
41MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
42MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
@@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device);
51static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_device * device);
52static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_device * device);
53static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_device *);
54static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_ccw_queue(struct dasd_device *, int);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_tasklet(struct dasd_device *);
57static void do_kick_device(struct work_struct *); 57static void do_kick_device(struct work_struct *);
@@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
483/* 483/*
484 * Add profiling information for cqr before execution. 484 * Add profiling information for cqr before execution.
485 */ 485 */
486static inline void 486static void
487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 487dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
488 struct request *req) 488 struct request *req)
489{ 489{
@@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
505/* 505/*
506 * Add profiling information for cqr after execution. 506 * Add profiling information for cqr after execution.
507 */ 507 */
508static inline void 508static void
509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 509dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
510 struct request *req) 510 struct request *req)
511{ 511{
@@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1022 irb->scsw.cstat == 0 && 1022 irb->scsw.cstat == 0 &&
1023 !irb->esw.esw0.erw.cons) 1023 !irb->esw.esw0.erw.cons)
1024 era = dasd_era_none; 1024 era = dasd_era_none;
1025 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
1026 era = dasd_era_fatal; /* don't recover this request */
1027 else if (irb->esw.esw0.erw.cons) 1025 else if (irb->esw.esw0.erw.cons)
1028 era = device->discipline->examine_error(cqr, irb); 1026 era = device->discipline->examine_error(cqr, irb);
1029 else 1027 else
@@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1104/* 1102/*
1105 * Process ccw request queue. 1103 * Process ccw request queue.
1106 */ 1104 */
1107static inline void 1105static void
1108__dasd_process_ccw_queue(struct dasd_device * device, 1106__dasd_process_ccw_queue(struct dasd_device * device,
1109 struct list_head *final_queue) 1107 struct list_head *final_queue)
1110{ 1108{
@@ -1127,7 +1125,9 @@ restart:
1127 cqr->status = DASD_CQR_FAILED; 1125 cqr->status = DASD_CQR_FAILED;
1128 cqr->stopclk = get_clock(); 1126 cqr->stopclk = get_clock();
1129 } else { 1127 } else {
1130 if (cqr->irb.esw.esw0.erw.cons) { 1128 if (cqr->irb.esw.esw0.erw.cons &&
1129 test_bit(DASD_CQR_FLAGS_USE_ERP,
1130 &cqr->flags)) {
1131 erp_fn = device->discipline-> 1131 erp_fn = device->discipline->
1132 erp_action(cqr); 1132 erp_action(cqr);
1133 erp_fn(cqr); 1133 erp_fn(cqr);
@@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1181/* 1181/*
1182 * Fetch requests from the block device queue. 1182 * Fetch requests from the block device queue.
1183 */ 1183 */
1184static inline void 1184static void
1185__dasd_process_blk_queue(struct dasd_device * device) 1185__dasd_process_blk_queue(struct dasd_device * device)
1186{ 1186{
1187 request_queue_t *queue; 1187 request_queue_t *queue;
@@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device)
1232 if (IS_ERR(cqr)) { 1232 if (IS_ERR(cqr)) {
1233 if (PTR_ERR(cqr) == -ENOMEM) 1233 if (PTR_ERR(cqr) == -ENOMEM)
1234 break; /* terminate request queue loop */ 1234 break; /* terminate request queue loop */
1235 if (PTR_ERR(cqr) == -EAGAIN) {
1236 /*
1237 * The current request cannot be build right
1238 * now, we have to try later. If this request
1239 * is the head-of-queue we stop the device
1240 * for 1/2 second.
1241 */
1242 if (!list_empty(&device->ccw_queue))
1243 break;
1244 device->stopped |= DASD_STOPPED_PENDING;
1245 dasd_set_timer(device, HZ/2);
1246 break;
1247 }
1235 DBF_DEV_EVENT(DBF_ERR, device, 1248 DBF_DEV_EVENT(DBF_ERR, device,
1236 "CCW creation failed (rc=%ld) " 1249 "CCW creation failed (rc=%ld) "
1237 "on request %p", 1250 "on request %p",
@@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device)
1254 * Take a look at the first request on the ccw queue and check 1267 * Take a look at the first request on the ccw queue and check
1255 * if it reached its expire time. If so, terminate the IO. 1268 * if it reached its expire time. If so, terminate the IO.
1256 */ 1269 */
1257static inline void 1270static void
1258__dasd_check_expire(struct dasd_device * device) 1271__dasd_check_expire(struct dasd_device * device)
1259{ 1272{
1260 struct dasd_ccw_req *cqr; 1273 struct dasd_ccw_req *cqr;
@@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device)
1285 * Take a look at the first request on the ccw queue and check 1298 * Take a look at the first request on the ccw queue and check
1286 * if it needs to be started. 1299 * if it needs to be started.
1287 */ 1300 */
1288static inline void 1301static void
1289__dasd_start_head(struct dasd_device * device) 1302__dasd_start_head(struct dasd_device * device)
1290{ 1303{
1291 struct dasd_ccw_req *cqr; 1304 struct dasd_ccw_req *cqr;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4d01040c2c63..8b9d68f6e016 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
170 /* log the erp chain if fatal error occurred */ 170 /* log the erp chain if fatal error occurred */
171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { 171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
172 dasd_log_sense(cqr, irb); 172 dasd_log_sense(cqr, irb);
173 dasd_log_ccw(cqr, 0, irb->scsw.cpa);
174 } 173 }
175 174
176 return era; 175 return era;
@@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2640 2639
2641 struct dasd_ccw_req *erp = NULL; 2640 struct dasd_ccw_req *erp = NULL;
2642 struct dasd_device *device = cqr->device; 2641 struct dasd_device *device = cqr->device;
2643 __u32 cpa = cqr->irb.scsw.cpa;
2644 struct dasd_ccw_req *temp_erp = NULL; 2642 struct dasd_ccw_req *temp_erp = NULL;
2645 2643
2646 if (device->features & DASD_FEATURE_ERPLOG) { 2644 if (device->features & DASD_FEATURE_ERPLOG) {
@@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2706 } 2704 }
2707 } 2705 }
2708 2706
2709 if (erp->status == DASD_CQR_FAILED)
2710 dasd_log_ccw(erp, 1, cpa);
2711
2712 /* enqueue added ERP request */ 2707 /* enqueue added ERP request */
2713 if (erp->status == DASD_CQR_FILLED) { 2708 if (erp->status == DASD_CQR_FILLED) {
2714 erp->status = DASD_CQR_QUEUED; 2709 erp->status = DASD_CQR_QUEUED;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 5943266152f5..ed70852cc915 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup);
136/* 136/*
137 * Read a device busid/devno from a string. 137 * Read a device busid/devno from a string.
138 */ 138 */
139static inline int 139static int
140dasd_busid(char **str, int *id0, int *id1, int *devno) 140dasd_busid(char **str, int *id0, int *id1, int *devno)
141{ 141{
142 int val, old_style; 142 int val, old_style;
@@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
182 * only one: "ro" for read-only devices. The default feature set 182 * only one: "ro" for read-only devices. The default feature set
183 * is empty (value 0). 183 * is empty (value 0).
184 */ 184 */
185static inline int 185static int
186dasd_feature_list(char *str, char **endp) 186dasd_feature_list(char *str, char **endp)
187{ 187{
188 int features, len, rc; 188 int features, len, rc;
@@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) {
341 return ERR_PTR(-EINVAL); 341 return ERR_PTR(-EINVAL);
342} 342}
343 343
344static inline char * 344static char *
345dasd_parse_next_element( char *parsestring ) { 345dasd_parse_next_element( char *parsestring ) {
346 char * residual_str; 346 char * residual_str;
347 residual_str = dasd_parse_keyword(parsestring); 347 residual_str = dasd_parse_keyword(parsestring);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 53db58a68617..ab782bb46ac1 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
43#define DIAG_MAX_RETRIES 32 43#define DIAG_MAX_RETRIES 32
44#define DIAG_TIMEOUT 50 * HZ 44#define DIAG_TIMEOUT 50 * HZ
45 45
46struct dasd_discipline dasd_diag_discipline; 46static struct dasd_discipline dasd_diag_discipline;
47 47
48struct dasd_diag_private { 48struct dasd_diag_private {
49 struct dasd_diag_characteristics rdc_data; 49 struct dasd_diag_characteristics rdc_data;
@@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd)
90 * block offset. On success, return zero and set end_block to contain the 90 * block offset. On success, return zero and set end_block to contain the
91 * number of blocks on the device minus the specified offset. Return non-zero 91 * number of blocks on the device minus the specified offset. Return non-zero
92 * otherwise. */ 92 * otherwise. */
93static __inline__ int 93static inline int
94mdsk_init_io(struct dasd_device *device, unsigned int blocksize, 94mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
95 blocknum_t offset, blocknum_t *end_block) 95 blocknum_t offset, blocknum_t *end_block)
96{ 96{
@@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
117 117
118/* Remove block I/O environment for device. Return zero on success, non-zero 118/* Remove block I/O environment for device. Return zero on success, non-zero
119 * otherwise. */ 119 * otherwise. */
120static __inline__ int 120static inline int
121mdsk_term_io(struct dasd_device * device) 121mdsk_term_io(struct dasd_device * device)
122{ 122{
123 struct dasd_diag_private *private; 123 struct dasd_diag_private *private;
@@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
576 "dump sense not available for DIAG data"); 576 "dump sense not available for DIAG data");
577} 577}
578 578
579struct dasd_discipline dasd_diag_discipline = { 579static struct dasd_discipline dasd_diag_discipline = {
580 .owner = THIS_MODULE, 580 .owner = THIS_MODULE,
581 .name = "DIAG", 581 .name = "DIAG",
582 .ebcname = "DIAG", 582 .ebcname = "DIAG",
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index fdaa471e845f..cecab2274a6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2)
134 return (d1 + (d2 - 1)) / d2; 134 return (d1 + (d2 - 1)) / d2;
135} 135}
136 136
137static inline int 137static unsigned int
138bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
139{
140 unsigned int fl1, fl2, int1, int2;
141 int bpr;
142
143 switch (rdc->formula) {
144 case 0x01:
145 fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
146 fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
147 ECKD_F1(rdc));
148 bpr = fl1 + fl2;
149 break;
150 case 0x02:
151 int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
152 int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
153 fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
154 ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
155 ECKD_F1(rdc));
156 fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
157 ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
158 ECKD_F1(rdc));
159 bpr = fl1 + fl2;
160 break;
161 default:
162 bpr = 0;
163 break;
164 }
165 return bpr;
166}
167
168static inline unsigned int
169bytes_per_track(struct dasd_eckd_characteristics *rdc)
170{
171 return *(unsigned int *) (rdc->byte_per_track) >> 8;
172}
173
174static inline unsigned int
175recs_per_track(struct dasd_eckd_characteristics * rdc, 138recs_per_track(struct dasd_eckd_characteristics * rdc,
176 unsigned int kl, unsigned int dl) 139 unsigned int kl, unsigned int dl)
177{ 140{
@@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc,
204 return 0; 167 return 0;
205} 168}
206 169
207static inline void 170static int
208check_XRC (struct ccw1 *de_ccw, 171check_XRC (struct ccw1 *de_ccw,
209 struct DE_eckd_data *data, 172 struct DE_eckd_data *data,
210 struct dasd_device *device) 173 struct dasd_device *device)
211{ 174{
212 struct dasd_eckd_private *private; 175 struct dasd_eckd_private *private;
176 int rc;
213 177
214 private = (struct dasd_eckd_private *) device->private; 178 private = (struct dasd_eckd_private *) device->private;
179 if (!private->rdc_data.facilities.XRC_supported)
180 return 0;
215 181
216 /* switch on System Time Stamp - needed for XRC Support */ 182 /* switch on System Time Stamp - needed for XRC Support */
217 if (private->rdc_data.facilities.XRC_supported) { 183 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
218 184 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
219 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
220 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
221
222 data->ep_sys_time = get_clock ();
223
224 de_ccw->count = sizeof (struct DE_eckd_data);
225 de_ccw->flags |= CCW_FLAG_SLI;
226 }
227 185
228 return; 186 rc = get_sync_clock(&data->ep_sys_time);
187 /* Ignore return code if sync clock is switched off. */
188 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0;
229 190
230} /* end check_XRC */ 191 de_ccw->count = sizeof (struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc;
194}
231 195
232static inline void 196static int
233define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, 197define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
234 int totrk, int cmd, struct dasd_device * device) 198 int totrk, int cmd, struct dasd_device * device)
235{ 199{
236 struct dasd_eckd_private *private; 200 struct dasd_eckd_private *private;
237 struct ch_t geo, beg, end; 201 struct ch_t geo, beg, end;
202 int rc = 0;
238 203
239 private = (struct dasd_eckd_private *) device->private; 204 private = (struct dasd_eckd_private *) device->private;
240 205
@@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
263 case DASD_ECKD_CCW_WRITE_KD_MT: 228 case DASD_ECKD_CCW_WRITE_KD_MT:
264 data->mask.perm = 0x02; 229 data->mask.perm = 0x02;
265 data->attributes.operation = private->attrib.operation; 230 data->attributes.operation = private->attrib.operation;
266 check_XRC (ccw, data, device); 231 rc = check_XRC (ccw, data, device);
267 break; 232 break;
268 case DASD_ECKD_CCW_WRITE_CKD: 233 case DASD_ECKD_CCW_WRITE_CKD:
269 case DASD_ECKD_CCW_WRITE_CKD_MT: 234 case DASD_ECKD_CCW_WRITE_CKD_MT:
270 data->attributes.operation = DASD_BYPASS_CACHE; 235 data->attributes.operation = DASD_BYPASS_CACHE;
271 check_XRC (ccw, data, device); 236 rc = check_XRC (ccw, data, device);
272 break; 237 break;
273 case DASD_ECKD_CCW_ERASE: 238 case DASD_ECKD_CCW_ERASE:
274 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 239 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
276 data->mask.perm = 0x3; 241 data->mask.perm = 0x3;
277 data->mask.auth = 0x1; 242 data->mask.auth = 0x1;
278 data->attributes.operation = DASD_BYPASS_CACHE; 243 data->attributes.operation = DASD_BYPASS_CACHE;
279 check_XRC (ccw, data, device); 244 rc = check_XRC (ccw, data, device);
280 break; 245 break;
281 default: 246 default:
282 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 247 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
@@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
312 data->beg_ext.head = beg.head; 277 data->beg_ext.head = beg.head;
313 data->end_ext.cyl = end.cyl; 278 data->end_ext.cyl = end.cyl;
314 data->end_ext.head = end.head; 279 data->end_ext.head = end.head;
280 return rc;
315} 281}
316 282
317static inline void 283static void
318locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 284locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
319 int rec_on_trk, int no_rec, int cmd, 285 int rec_on_trk, int no_rec, int cmd,
320 struct dasd_device * device, int reclen) 286 struct dasd_device * device, int reclen)
@@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device)
548/* 514/*
549 * Build CP for Perform Subsystem Function - SSC. 515 * Build CP for Perform Subsystem Function - SSC.
550 */ 516 */
551struct dasd_ccw_req * 517static struct dasd_ccw_req *
552dasd_eckd_build_psf_ssc(struct dasd_device *device) 518dasd_eckd_build_psf_ssc(struct dasd_device *device)
553{ 519{
554 struct dasd_ccw_req *cqr; 520 struct dasd_ccw_req *cqr;
@@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1200 return cqr; 1166 return cqr;
1201 ccw = cqr->cpaddr; 1167 ccw = cqr->cpaddr;
1202 /* First ccw is define extent. */ 1168 /* First ccw is define extent. */
1203 define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); 1169 if (define_extent(ccw++, cqr->data, first_trk,
1170 last_trk, cmd, device) == -EAGAIN) {
1171 /* Clock not in sync and XRC is enabled. Try again later. */
1172 dasd_sfree_request(cqr, device);
1173 return ERR_PTR(-EAGAIN);
1174 }
1204 /* Build locate_record+read/write/ccws. */ 1175 /* Build locate_record+read/write/ccws. */
1205 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); 1176 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1206 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1177 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device)
1380 cqr->device = device; 1351 cqr->device = device;
1381 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1352 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1382 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1383 cqr->retries = 0; 1354 cqr->retries = 2; /* set retry counter to enable basic ERP */
1384 cqr->expires = 2 * HZ; 1355 cqr->expires = 2 * HZ;
1385 cqr->buildclk = get_clock(); 1356 cqr->buildclk = get_clock();
1386 cqr->status = DASD_CQR_FILLED; 1357 cqr->status = DASD_CQR_FILLED;
@@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1420 cqr->device = device; 1391 cqr->device = device;
1421 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1392 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1422 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1393 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1423 cqr->retries = 0; 1394 cqr->retries = 2; /* set retry counter to enable basic ERP */
1424 cqr->expires = 2 * HZ; 1395 cqr->expires = 2 * HZ;
1425 cqr->buildclk = get_clock(); 1396 cqr->buildclk = get_clock();
1426 cqr->status = DASD_CQR_FILLED; 1397 cqr->status = DASD_CQR_FILLED;
@@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1459 cqr->device = device; 1430 cqr->device = device;
1460 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1431 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1461 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1432 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1462 cqr->retries = 0; 1433 cqr->retries = 2; /* set retry counter to enable basic ERP */
1463 cqr->expires = 2 * HZ; 1434 cqr->expires = 2 * HZ;
1464 cqr->buildclk = get_clock(); 1435 cqr->buildclk = get_clock();
1465 cqr->status = DASD_CQR_FILLED; 1436 cqr->status = DASD_CQR_FILLED;
@@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
1609 * Dump the range of CCWs into 'page' buffer 1580 * Dump the range of CCWs into 'page' buffer
1610 * and return number of printed chars. 1581 * and return number of printed chars.
1611 */ 1582 */
1612static inline int 1583static int
1613dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 1584dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1614{ 1585{
1615 int len, count; 1586 int len, count;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index e0bf30ebb215..6cedc914077e 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = {
658 .owner = THIS_MODULE, 658 .owner = THIS_MODULE,
659}; 659};
660 660
661static struct miscdevice dasd_eer_dev = { 661static struct miscdevice *dasd_eer_dev = NULL;
662 .minor = MISC_DYNAMIC_MINOR,
663 .name = "dasd_eer",
664 .fops = &dasd_eer_fops,
665};
666 662
667int __init dasd_eer_init(void) 663int __init dasd_eer_init(void)
668{ 664{
669 int rc; 665 int rc;
670 666
671 rc = misc_register(&dasd_eer_dev); 667 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
668 if (!dasd_eer_dev)
669 return -ENOMEM;
670
671 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
672 dasd_eer_dev->name = "dasd_eer";
673 dasd_eer_dev->fops = &dasd_eer_fops;
674
675 rc = misc_register(dasd_eer_dev);
672 if (rc) { 676 if (rc) {
677 kfree(dasd_eer_dev);
678 dasd_eer_dev = NULL;
673 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 679 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
674 "register misc device"); 680 "register misc device");
675 return rc; 681 return rc;
@@ -680,5 +686,9 @@ int __init dasd_eer_init(void)
680 686
681void dasd_eer_exit(void) 687void dasd_eer_exit(void)
682{ 688{
683 WARN_ON(misc_deregister(&dasd_eer_dev) != 0); 689 if (dasd_eer_dev) {
690 WARN_ON(misc_deregister(dasd_eer_dev) != 0);
691 kfree(dasd_eer_dev);
692 dasd_eer_dev = NULL;
693 }
684} 694}
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 58a65097922b..caa5d91420f8 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
152 152
153} /* end default_erp_postaction */ 153} /* end default_erp_postaction */
154 154
155/*
156 * Print the hex dump of the memory used by a request. This includes
157 * all error recovery ccws that have been chained in from of the
158 * real request.
159 */
160static inline void
161hex_dump_memory(struct dasd_device *device, void *data, int len)
162{
163 int *pint;
164
165 pint = (int *) data;
166 while (len > 0) {
167 DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
168 pint, pint[0], pint[1], pint[2], pint[3]);
169 pint += 4;
170 len -= 16;
171 }
172}
173
174void 155void
175dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) 156dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
176{ 157{
@@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
182 device->discipline->dump_sense(device, cqr, irb); 163 device->discipline->dump_sense(device, cqr, irb);
183} 164}
184 165
185void
186dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
187{
188 struct dasd_device *device;
189 struct dasd_ccw_req *lcqr;
190 struct ccw1 *ccw;
191 int cplength;
192
193 device = cqr->device;
194 /* log the channel program */
195 for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
196 DEV_MESSAGE(KERN_ERR, device,
197 "(%s) ERP chain report for req: %p",
198 caller == 0 ? "EXAMINE" : "ACTION", lcqr);
199 hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
200
201 cplength = 1;
202 ccw = lcqr->cpaddr;
203 while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
204 cplength++;
205
206 if (cplength > 40) { /* log only parts of the CP */
207 DEV_MESSAGE(KERN_ERR, device, "%s",
208 "Start of channel program:");
209 hex_dump_memory(device, lcqr->cpaddr,
210 40*sizeof(struct ccw1));
211
212 DEV_MESSAGE(KERN_ERR, device, "%s",
213 "End of channel program:");
214 hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
215 10*sizeof(struct ccw1));
216 } else { /* log the whole CP */
217 DEV_MESSAGE(KERN_ERR, device, "%s",
218 "Channel program (complete):");
219 hex_dump_memory(device, lcqr->cpaddr,
220 cplength*sizeof(struct ccw1));
221 }
222
223 if (lcqr != cqr)
224 continue;
225
226 /*
227 * Log bytes arround failed CCW but only if we did
228 * not log the whole CP of the CCW is outside the
229 * logged CP.
230 */
231 if (cplength > 40 ||
232 ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
233 (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
234
235 DEV_MESSAGE(KERN_ERR, device,
236 "Failed CCW (%p) (area):",
237 (void *) (long) cpa);
238 hex_dump_memory(device, cqr->cpaddr - 10,
239 20*sizeof(struct ccw1));
240 }
241 }
242
243} /* end log_erp_chain */
244
245EXPORT_SYMBOL(dasd_default_erp_action); 166EXPORT_SYMBOL(dasd_default_erp_action);
246EXPORT_SYMBOL(dasd_default_erp_postaction); 167EXPORT_SYMBOL(dasd_default_erp_postaction);
247EXPORT_SYMBOL(dasd_alloc_erp_request); 168EXPORT_SYMBOL(dasd_alloc_erp_request);
248EXPORT_SYMBOL(dasd_free_erp_request); 169EXPORT_SYMBOL(dasd_free_erp_request);
249EXPORT_SYMBOL(dasd_log_sense); 170EXPORT_SYMBOL(dasd_log_sense);
250EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index b857fd5893fd..be0909e39226 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = {
75 .notify = dasd_generic_notify, 75 .notify = dasd_generic_notify,
76}; 76};
77 77
78static inline void 78static void
79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, 79define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
80 int blksize, int beg, int nr) 80 int blksize, int beg, int nr)
81{ 81{
@@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
95 data->ext_end = nr - 1; 95 data->ext_end = nr - 1;
96} 96}
97 97
98static inline void 98static void
99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, 99locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
100 int block_nr, int block_ct) 100 int block_nr, int block_ct)
101{ 101{
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index d163632101d2..47ba4462708d 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device)
147 */ 147 */
148 memset(&bpart, 0, sizeof(struct blkpg_partition)); 148 memset(&bpart, 0, sizeof(struct blkpg_partition));
149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); 149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
150 barg.data = (void __user *) &bpart; 150 barg.data = (void __force __user *) &bpart;
151 barg.op = BLKPG_DEL_PARTITION; 151 barg.op = BLKPG_DEL_PARTITION;
152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) 152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); 153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index fb725e3b08fe..a2cc69e11410 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
559 struct dasd_device *); 559 struct dasd_device *);
560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 560void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
561void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 561void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
562void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
563 562
564/* externals in dasd_3370_erp.c */ 563/* externals in dasd_3370_erp.c */
565dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); 564dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index bfa010f6dab2..8b7e11815d70 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL;
28static struct proc_dir_entry *dasd_devices_entry = NULL; 28static struct proc_dir_entry *dasd_devices_entry = NULL;
29static struct proc_dir_entry *dasd_statistics_entry = NULL; 29static struct proc_dir_entry *dasd_statistics_entry = NULL;
30 30
31static inline char * 31static char *
32dasd_get_user_string(const char __user *user_buf, size_t user_len) 32dasd_get_user_string(const char __user *user_buf, size_t user_len)
33{ 33{
34 char *buffer; 34 char *buffer;
@@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = {
154 .release = seq_release, 154 .release = seq_release,
155}; 155};
156 156
157static inline int 157static int
158dasd_calc_metrics(char *page, char **start, off_t off, 158dasd_calc_metrics(char *page, char **start, off_t off,
159 int count, int *eof, int len) 159 int count, int *eof, int len)
160{ 160{
@@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off,
167 return len; 167 return len;
168} 168}
169 169
170static inline char * 170static char *
171dasd_statistics_array(char *str, int *array, int shift) 171dasd_statistics_array(char *str, unsigned int *array, int shift)
172{ 172{
173 int i; 173 int i;
174 174
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index be9b05347b4f..1340451ea408 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev)
102 * device needs to be enqueued before the semaphore is 102 * device needs to be enqueued before the semaphore is
103 * freed. 103 * freed.
104 */ 104 */
105static inline int 105static int
106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) 106dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
107{ 107{
108 int minor, found; 108 int minor, found;
@@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
230 SEGMENT_SHARED); 230 SEGMENT_SHARED);
231 if (rc < 0) { 231 if (rc < 0) {
232 BUG_ON(rc == -EINVAL); 232 BUG_ON(rc == -EINVAL);
233 if (rc == -EIO || rc == -ENOENT) 233 if (rc != -EAGAIN)
234 goto removeseg; 234 goto removeseg;
235 } else { 235 } else {
236 dev_info->is_shared = 1; 236 dev_info->is_shared = 1;
@@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch
253 SEGMENT_EXCLUSIVE); 253 SEGMENT_EXCLUSIVE);
254 if (rc < 0) { 254 if (rc < 0) {
255 BUG_ON(rc == -EINVAL); 255 BUG_ON(rc == -EINVAL);
256 if (rc == -EIO || rc == -ENOENT) 256 if (rc != -EAGAIN)
257 goto removeseg; 257 goto removeseg;
258 } else { 258 } else {
259 dev_info->is_shared = 0; 259 dev_info->is_shared = 0;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index c3e97b4fc186..293e667b50f2 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,7 +2,8 @@
2# S/390 character devices 2# S/390 character devices
3# 3#
4 4
5obj-y += ctrlchar.o keyboard.o defkeymap.o 5obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
6 sclp_info.o
6 7
7obj-$(CONFIG_TN3270) += raw3270.o 8obj-$(CONFIG_TN3270) += raw3270.o
8obj-$(CONFIG_TN3270_CONSOLE) += con3270.o 9obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
@@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o
11 12
12obj-$(CONFIG_TN3215) += con3215.o 13obj-$(CONFIG_TN3215) += con3215.o
13 14
14obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o
15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o 15obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o 16obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o 17obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 25b5d7a66417..9a328f14a641 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = {
1121 * 3215 tty registration code called from tty_init(). 1121 * 3215 tty registration code called from tty_init().
1122 * Most kernel services (incl. kmalloc) are available at this poimt. 1122 * Most kernel services (incl. kmalloc) are available at this poimt.
1123 */ 1123 */
1124int __init 1124static int __init
1125tty3215_init(void) 1125tty3215_init(void)
1126{ 1126{
1127 struct tty_driver *driver; 1127 struct tty_driver *driver;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 7566be890688..8e7f2d7633d6 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *);
69/* 69/*
70 * Setup timeout for a device. On timeout trigger an update. 70 * Setup timeout for a device. On timeout trigger an update.
71 */ 71 */
72void 72static void con3270_set_timer(struct con3270 *cp, int expires)
73con3270_set_timer(struct con3270 *cp, int expires)
74{ 73{
75 if (expires == 0) { 74 if (expires == 0) {
76 if (timer_pending(&cp->timer)) 75 if (timer_pending(&cp->timer))
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 17027d918cf7..564baca01b7c 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -5,6 +5,8 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/keyboard.h> 6#include <linux/keyboard.h>
7#include <linux/kd.h> 7#include <linux/kd.h>
8#include <linux/kbd_kern.h>
9#include <linux/kbd_diacr.h>
8 10
9u_short plain_map[NR_KEYS] = { 11u_short plain_map[NR_KEYS] = {
10 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 12 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 0893d306ae80..e1a746269c4c 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -23,7 +23,7 @@
23#include "raw3270.h" 23#include "raw3270.h"
24#include "ctrlchar.h" 24#include "ctrlchar.h"
25 25
26struct raw3270_fn fs3270_fn; 26static struct raw3270_fn fs3270_fn;
27 27
28struct fs3270 { 28struct fs3270 {
29 struct raw3270_view view; 29 struct raw3270_view view;
@@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view)
401} 401}
402 402
403/* View to a 3270 device. Can be console, tty or fullscreen. */ 403/* View to a 3270 device. Can be console, tty or fullscreen. */
404struct raw3270_fn fs3270_fn = { 404static struct raw3270_fn fs3270_fn = {
405 .activate = fs3270_activate, 405 .activate = fs3270_activate,
406 .deactivate = fs3270_deactivate, 406 .deactivate = fs3270_deactivate,
407 .intv = (void *) fs3270_irq, 407 .intv = (void *) fs3270_irq,
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 3e86fd1756e5..f62f9a4e8950 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
148 } 148 }
149} 149}
150 150
151#if 0
151/* 152/*
152 * Generate ebcdic -> ascii translation table from kbd_data. 153 * Generate ebcdic -> ascii translation table from kbd_data.
153 */ 154 */
@@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
173 } 174 }
174 } 175 }
175} 176}
177#endif
176 178
177/* 179/*
178 * We have a combining character DIACR here, followed by the character CH. 180 * We have a combining character DIACR here, followed by the character CH.
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c
index cdb24f528112..9e451acc6491 100644
--- a/drivers/s390/char/monwriter.c
+++ b/drivers/s390/char/monwriter.c
@@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
67 return -EINVAL; 67 return -EINVAL;
68} 68}
69 69
70static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, 70static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
71 struct monwrite_hdr *monhdr) 71 struct monwrite_hdr *monhdr)
72{ 72{
73 struct mon_buf *entry, *next; 73 struct mon_buf *entry, *next;
74 74
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7a84014f2037..8facd14adb7c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31 31
32struct class *class3270; 32static struct class *class3270;
33 33
34/* The main 3270 data structure. */ 34/* The main 3270 data structure. */
35struct raw3270 { 35struct raw3270 {
@@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
86/* 86/*
87 * Encode array for 12 bit 3270 addresses. 87 * Encode array for 12 bit 3270 addresses.
88 */ 88 */
89unsigned char raw3270_ebcgraf[64] = { 89static unsigned char raw3270_ebcgraf[64] = {
90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 90 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 91 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 92 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 8a056df09d6b..f171de3b0b11 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t {
59/* Internal state: is a request active at the sclp? */ 59/* Internal state: is a request active at the sclp? */
60static volatile enum sclp_running_state_t { 60static volatile enum sclp_running_state_t {
61 sclp_running_state_idle, 61 sclp_running_state_idle,
62 sclp_running_state_running 62 sclp_running_state_running,
63 sclp_running_state_reset_pending
63} sclp_running_state = sclp_running_state_idle; 64} sclp_running_state = sclp_running_state_idle;
64 65
65/* Internal state: is a read request pending? */ 66/* Internal state: is a read request pending? */
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t {
88 89
89/* Timeout intervals in seconds.*/ 90/* Timeout intervals in seconds.*/
90#define SCLP_BUSY_INTERVAL 10 91#define SCLP_BUSY_INTERVAL 10
91#define SCLP_RETRY_INTERVAL 15 92#define SCLP_RETRY_INTERVAL 30
92 93
93static void sclp_process_queue(void); 94static void sclp_process_queue(void);
94static int sclp_init_mask(int calculate); 95static int sclp_init_mask(int calculate);
95static int sclp_init(void); 96static int sclp_init(void);
96 97
97/* Perform service call. Return 0 on success, non-zero otherwise. */ 98/* Perform service call. Return 0 on success, non-zero otherwise. */
98static int 99int
99service_call(sclp_cmdw_t command, void *sccb) 100sclp_service_call(sclp_cmdw_t command, void *sccb)
100{ 101{
101 int cc; 102 int cc;
102 103
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb)
113 return 0; 114 return 0;
114} 115}
115 116
116/* Request timeout handler. Restart the request queue. If DATA is non-zero, 117static inline void __sclp_make_read_req(void);
117 * force restart of running request. */ 118
118static void 119static void
119sclp_request_timeout(unsigned long data) 120__sclp_queue_read_req(void)
120{ 121{
121 unsigned long flags; 122 if (sclp_reading_state == sclp_reading_state_idle) {
122 123 sclp_reading_state = sclp_reading_state_reading;
123 if (data) { 124 __sclp_make_read_req();
124 spin_lock_irqsave(&sclp_lock, flags); 125 /* Add request to head of queue */
125 sclp_running_state = sclp_running_state_idle; 126 list_add(&sclp_read_req.list, &sclp_req_queue);
126 spin_unlock_irqrestore(&sclp_lock, flags);
127 } 127 }
128 sclp_process_queue();
129} 128}
130 129
131/* Set up request retry timer. Called while sclp_lock is locked. */ 130/* Set up request retry timer. Called while sclp_lock is locked. */
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
140 add_timer(&sclp_request_timer); 139 add_timer(&sclp_request_timer);
141} 140}
142 141
142/* Request timeout handler. Restart the request queue. If DATA is non-zero,
143 * force restart of running request. */
144static void
145sclp_request_timeout(unsigned long data)
146{
147 unsigned long flags;
148
149 spin_lock_irqsave(&sclp_lock, flags);
150 if (data) {
151 if (sclp_running_state == sclp_running_state_running) {
152 /* Break running state and queue NOP read event request
153 * to get a defined interface state. */
154 __sclp_queue_read_req();
155 sclp_running_state = sclp_running_state_idle;
156 }
157 } else {
158 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
159 sclp_request_timeout, 0);
160 }
161 spin_unlock_irqrestore(&sclp_lock, flags);
162 sclp_process_queue();
163}
164
143/* Try to start a request. Return zero if the request was successfully 165/* Try to start a request. Return zero if the request was successfully
144 * started or if it will be started at a later time. Return non-zero otherwise. 166 * started or if it will be started at a later time. Return non-zero otherwise.
145 * Called while sclp_lock is locked. */ 167 * Called while sclp_lock is locked. */
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req)
151 if (sclp_running_state != sclp_running_state_idle) 173 if (sclp_running_state != sclp_running_state_idle)
152 return 0; 174 return 0;
153 del_timer(&sclp_request_timer); 175 del_timer(&sclp_request_timer);
154 rc = service_call(req->command, req->sccb); 176 rc = sclp_service_call(req->command, req->sccb);
155 req->start_count++; 177 req->start_count++;
156 178
157 if (rc == 0) { 179 if (rc == 0) {
@@ -191,7 +213,15 @@ sclp_process_queue(void)
191 rc = __sclp_start_request(req); 213 rc = __sclp_start_request(req);
192 if (rc == 0) 214 if (rc == 0)
193 break; 215 break;
194 /* Request failed. */ 216 /* Request failed */
217 if (req->start_count > 1) {
218 /* Cannot abort already submitted request - could still
219 * be active at the SCLP */
220 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
221 sclp_request_timeout, 0);
222 break;
223 }
224 /* Post-processing for aborted request */
195 list_del(&req->list); 225 list_del(&req->list);
196 if (req->callback) { 226 if (req->callback) {
197 spin_unlock_irqrestore(&sclp_lock, flags); 227 spin_unlock_irqrestore(&sclp_lock, flags);
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req)
221 list_add_tail(&req->list, &sclp_req_queue); 251 list_add_tail(&req->list, &sclp_req_queue);
222 rc = 0; 252 rc = 0;
223 /* Start if request is first in list */ 253 /* Start if request is first in list */
224 if (req->list.prev == &sclp_req_queue) { 254 if (sclp_running_state == sclp_running_state_idle &&
255 req->list.prev == &sclp_req_queue) {
225 rc = __sclp_start_request(req); 256 rc = __sclp_start_request(req);
226 if (rc) 257 if (rc)
227 list_del(&req->list); 258 list_del(&req->list);
@@ -294,7 +325,7 @@ __sclp_make_read_req(void)
294 sccb = (struct sccb_header *) sclp_read_sccb; 325 sccb = (struct sccb_header *) sclp_read_sccb;
295 clear_page(sccb); 326 clear_page(sccb);
296 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 327 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
297 sclp_read_req.command = SCLP_CMDW_READDATA; 328 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
298 sclp_read_req.status = SCLP_REQ_QUEUED; 329 sclp_read_req.status = SCLP_REQ_QUEUED;
299 sclp_read_req.start_count = 0; 330 sclp_read_req.start_count = 0;
300 sclp_read_req.callback = sclp_read_cb; 331 sclp_read_req.callback = sclp_read_cb;
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code)
334 finished_sccb = S390_lowcore.ext_params & 0xfffffff8; 365 finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
335 evbuf_pending = S390_lowcore.ext_params & 0x3; 366 evbuf_pending = S390_lowcore.ext_params & 0x3;
336 if (finished_sccb) { 367 if (finished_sccb) {
368 del_timer(&sclp_request_timer);
369 sclp_running_state = sclp_running_state_reset_pending;
337 req = __sclp_find_req(finished_sccb); 370 req = __sclp_find_req(finished_sccb);
338 if (req) { 371 if (req) {
339 /* Request post-processing */ 372 /* Request post-processing */
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code)
348 sclp_running_state = sclp_running_state_idle; 381 sclp_running_state = sclp_running_state_idle;
349 } 382 }
350 if (evbuf_pending && sclp_receive_mask != 0 && 383 if (evbuf_pending && sclp_receive_mask != 0 &&
351 sclp_reading_state == sclp_reading_state_idle && 384 sclp_activation_state == sclp_activation_state_active)
352 sclp_activation_state == sclp_activation_state_active ) { 385 __sclp_queue_read_req();
353 sclp_reading_state = sclp_reading_state_reading;
354 __sclp_make_read_req();
355 /* Add request to head of queue */
356 list_add(&sclp_read_req.list, &sclp_req_queue);
357 }
358 spin_unlock(&sclp_lock); 386 spin_unlock(&sclp_lock);
359 sclp_process_queue(); 387 sclp_process_queue();
360} 388}
@@ -374,6 +402,7 @@ sclp_sync_wait(void)
374 unsigned long flags; 402 unsigned long flags;
375 unsigned long cr0, cr0_sync; 403 unsigned long cr0, cr0_sync;
376 u64 timeout; 404 u64 timeout;
405 int irq_context;
377 406
378 /* We'll be disabling timer interrupts, so we need a custom timeout 407 /* We'll be disabling timer interrupts, so we need a custom timeout
379 * mechanism */ 408 * mechanism */
@@ -386,7 +415,9 @@ sclp_sync_wait(void)
386 } 415 }
387 local_irq_save(flags); 416 local_irq_save(flags);
388 /* Prevent bottom half from executing once we force interrupts open */ 417 /* Prevent bottom half from executing once we force interrupts open */
389 local_bh_disable(); 418 irq_context = in_interrupt();
419 if (!irq_context)
420 local_bh_disable();
390 /* Enable service-signal interruption, disable timer interrupts */ 421 /* Enable service-signal interruption, disable timer interrupts */
391 trace_hardirqs_on(); 422 trace_hardirqs_on();
392 __ctl_store(cr0, 0, 0); 423 __ctl_store(cr0, 0, 0);
@@ -402,19 +433,19 @@ sclp_sync_wait(void)
402 get_clock() > timeout && 433 get_clock() > timeout &&
403 del_timer(&sclp_request_timer)) 434 del_timer(&sclp_request_timer))
404 sclp_request_timer.function(sclp_request_timer.data); 435 sclp_request_timer.function(sclp_request_timer.data);
405 barrier();
406 cpu_relax(); 436 cpu_relax();
407 } 437 }
408 local_irq_disable(); 438 local_irq_disable();
409 __ctl_load(cr0, 0, 0); 439 __ctl_load(cr0, 0, 0);
410 _local_bh_enable(); 440 if (!irq_context)
441 _local_bh_enable();
411 local_irq_restore(flags); 442 local_irq_restore(flags);
412} 443}
413 444
414EXPORT_SYMBOL(sclp_sync_wait); 445EXPORT_SYMBOL(sclp_sync_wait);
415 446
416/* Dispatch changes in send and receive mask to registered listeners. */ 447/* Dispatch changes in send and receive mask to registered listeners. */
417static inline void 448static void
418sclp_dispatch_state_change(void) 449sclp_dispatch_state_change(void)
419{ 450{
420 struct list_head *l; 451 struct list_head *l;
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask)
597 sccb = (struct init_sccb *) sclp_init_sccb; 628 sccb = (struct init_sccb *) sclp_init_sccb;
598 clear_page(sccb); 629 clear_page(sccb);
599 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 630 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
600 sclp_init_req.command = SCLP_CMDW_WRITEMASK; 631 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
601 sclp_init_req.status = SCLP_REQ_FILLED; 632 sclp_init_req.status = SCLP_REQ_FILLED;
602 sclp_init_req.start_count = 0; 633 sclp_init_req.start_count = 0;
603 sclp_init_req.callback = NULL; 634 sclp_init_req.callback = NULL;
@@ -800,7 +831,7 @@ sclp_check_interface(void)
800 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 831 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
801 __sclp_make_init_req(0, 0); 832 __sclp_make_init_req(0, 0);
802 sccb = (struct init_sccb *) sclp_init_req.sccb; 833 sccb = (struct init_sccb *) sclp_init_req.sccb;
803 rc = service_call(sclp_init_req.command, sccb); 834 rc = sclp_service_call(sclp_init_req.command, sccb);
804 if (rc == -EIO) 835 if (rc == -EIO)
805 break; 836 break;
806 sclp_init_req.status = SCLP_REQ_RUNNING; 837 sclp_init_req.status = SCLP_REQ_RUNNING;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 2c71d6ee7b5b..7d29ab45a6ed 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -12,7 +12,7 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/list.h> 14#include <linux/list.h>
15 15#include <asm/sclp.h>
16#include <asm/ebcdic.h> 16#include <asm/ebcdic.h>
17 17
18/* maximum number of pages concerning our own memory management */ 18/* maximum number of pages concerning our own memory management */
@@ -49,9 +49,11 @@
49 49
50typedef unsigned int sclp_cmdw_t; 50typedef unsigned int sclp_cmdw_t;
51 51
52#define SCLP_CMDW_READDATA 0x00770005 52#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
53#define SCLP_CMDW_WRITEDATA 0x00760005 53#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
54#define SCLP_CMDW_WRITEMASK 0x00780005 54#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
55#define SCLP_CMDW_READ_SCP_INFO 0x00020001
56#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
55 57
56#define GDS_ID_MDSMU 0x1310 58#define GDS_ID_MDSMU 0x1310
57#define GDS_ID_MDSRouteInfo 0x1311 59#define GDS_ID_MDSRouteInfo 0x1311
@@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t;
66 68
67typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ 69typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
68 70
69struct sccb_header {
70 u16 length;
71 u8 function_code;
72 u8 control_mask[3];
73 u16 response_code;
74} __attribute__((packed));
75
76struct gds_subvector { 71struct gds_subvector {
77 u8 length; 72 u8 length;
78 u8 key; 73 u8 key;
@@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg);
131int sclp_remove_processed(struct sccb_header *sccb); 126int sclp_remove_processed(struct sccb_header *sccb);
132int sclp_deactivate(void); 127int sclp_deactivate(void);
133int sclp_reactivate(void); 128int sclp_reactivate(void);
129int sclp_service_call(sclp_cmdw_t command, void *sccb);
134 130
135/* useful inlines */ 131/* useful inlines */
136 132
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 86864f641716..ead1043d788e 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); 66 } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback));
67} 67}
68 68
69static inline void 69static void
70sclp_conbuf_emit(void) 70sclp_conbuf_emit(void)
71{ 71{
72 struct sclp_buffer* buffer; 72 struct sclp_buffer* buffer;
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 4f873ae148b7..65aa2c85737f 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -169,7 +169,7 @@ cpi_prepare_req(void)
169 } 169 }
170 170
171 /* prepare request data structure presented to SCLP driver */ 171 /* prepare request data structure presented to SCLP driver */
172 req->command = SCLP_CMDW_WRITEDATA; 172 req->command = SCLP_CMDW_WRITE_EVENT_DATA;
173 req->sccb = sccb; 173 req->sccb = sccb;
174 req->status = SCLP_REQ_FILLED; 174 req->status = SCLP_REQ_FILLED;
175 req->callback = cpi_callback; 175 req->callback = cpi_callback;
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c
new file mode 100644
index 000000000000..7bcbe643b087
--- /dev/null
+++ b/drivers/s390/char/sclp_info.c
@@ -0,0 +1,57 @@
1/*
2 * drivers/s390/char/sclp_info.c
3 *
4 * Copyright IBM Corp. 2007
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/init.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <asm/sclp.h>
12#include "sclp.h"
13
14struct sclp_readinfo_sccb s390_readinfo_sccb;
15
16void __init sclp_readinfo_early(void)
17{
18 sclp_cmdw_t command;
19 struct sccb_header *sccb;
20 int ret;
21
22 __ctl_set_bit(0, 9); /* enable service signal subclass mask */
23
24 sccb = &s390_readinfo_sccb.header;
25 command = SCLP_CMDW_READ_SCP_INFO_FORCED;
26 while (1) {
27 u16 response;
28
29 memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb));
30 sccb->length = sizeof(s390_readinfo_sccb);
31 sccb->control_mask[2] = 0x80;
32
33 ret = sclp_service_call(command, &s390_readinfo_sccb);
34
35 if (ret == -EIO)
36 goto out;
37 if (ret == -EBUSY)
38 continue;
39
40 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
41 PSW_MASK_WAIT | PSW_DEFAULT_KEY);
42 local_irq_disable();
43 barrier();
44
45 response = sccb->response_code;
46
47 if (response == 0x10)
48 break;
49
50 if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO)
51 break;
52
53 command = SCLP_CMDW_READ_SCP_INFO;
54 }
55out:
56 __ctl_clear_bit(0, 9); /* disable service signal subclass mask */
57}
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index 0c92d3909cca..2486783ea58e 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
460 sccb->msg_buf.header.type = EvTyp_PMsgCmd; 460 sccb->msg_buf.header.type = EvTyp_PMsgCmd;
461 else 461 else
462 return -ENOSYS; 462 return -ENOSYS;
463 buffer->request.command = SCLP_CMDW_WRITEDATA; 463 buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
464 buffer->request.status = SCLP_REQ_FILLED; 464 buffer->request.status = SCLP_REQ_FILLED;
465 buffer->request.callback = sclp_writedata_callback; 465 buffer->request.callback = sclp_writedata_callback;
466 buffer->request.callback_data = buffer; 466 buffer->request.callback_data = buffer;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 2d173e5c8a09..90536f60bf50 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = {
721 .ioctl = sclp_tty_ioctl, 721 .ioctl = sclp_tty_ioctl,
722}; 722};
723 723
724int __init 724static int __init
725sclp_tty_init(void) 725sclp_tty_init(void)
726{ 726{
727 struct tty_driver *driver; 727 struct tty_driver *driver;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 723bf4191bfe..544f137d70d7 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request)
207 request->sclp_req.status = SCLP_REQ_FAILED; 207 request->sclp_req.status = SCLP_REQ_FAILED;
208 return -EIO; 208 return -EIO;
209 } 209 }
210 request->sclp_req.command = SCLP_CMDW_WRITEDATA; 210 request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
211 request->sclp_req.status = SCLP_REQ_FILLED; 211 request->sclp_req.status = SCLP_REQ_FILLED;
212 request->sclp_req.callback = sclp_vt220_callback; 212 request->sclp_req.callback = sclp_vt220_callback;
213 request->sclp_req.callback_data = (void *) request; 213 request->sclp_req.callback_data = (void *) request;
@@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = {
669/* 669/*
670 * Register driver with SCLP and Linux and initialize internal tty structures. 670 * Register driver with SCLP and Linux and initialize internal tty structures.
671 */ 671 */
672int __init 672static int __init
673sclp_vt220_tty_init(void) 673sclp_vt220_tty_init(void)
674{ 674{
675 struct tty_driver *driver; 675 struct tty_driver *driver;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index c9f1c4c8bb13..bb4ff537729d 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -3,7 +3,7 @@
3 * tape device driver for 3480/3490E/3590 tapes. 3 * tape device driver for 3480/3490E/3590 tapes.
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -99,7 +99,11 @@ enum tape_op {
99 TO_DIS, /* Tape display */ 99 TO_DIS, /* Tape display */
100 TO_ASSIGN, /* Assign tape to channel path */ 100 TO_ASSIGN, /* Assign tape to channel path */
101 TO_UNASSIGN, /* Unassign tape from channel path */ 101 TO_UNASSIGN, /* Unassign tape from channel path */
102 TO_SIZE /* #entries in tape_op_t */ 102 TO_CRYPT_ON, /* Enable encrpytion */
103 TO_CRYPT_OFF, /* Disable encrpytion */
104 TO_KEKL_SET, /* Set KEK label */
105 TO_KEKL_QUERY, /* Query KEK label */
106 TO_SIZE, /* #entries in tape_op_t */
103}; 107};
104 108
105/* Forward declaration */ 109/* Forward declaration */
@@ -112,6 +116,7 @@ enum tape_request_status {
112 TAPE_REQUEST_IN_IO, /* request is currently in IO */ 116 TAPE_REQUEST_IN_IO, /* request is currently in IO */
113 TAPE_REQUEST_DONE, /* request is completed. */ 117 TAPE_REQUEST_DONE, /* request is completed. */
114 TAPE_REQUEST_CANCEL, /* request should be canceled. */ 118 TAPE_REQUEST_CANCEL, /* request should be canceled. */
119 TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
115}; 120};
116 121
117/* Tape CCW request */ 122/* Tape CCW request */
@@ -164,10 +169,11 @@ struct tape_discipline {
164 * The discipline irq function either returns an error code (<0) which 169 * The discipline irq function either returns an error code (<0) which
165 * means that the request has failed with an error or one of the following: 170 * means that the request has failed with an error or one of the following:
166 */ 171 */
167#define TAPE_IO_SUCCESS 0 /* request successful */ 172#define TAPE_IO_SUCCESS 0 /* request successful */
168#define TAPE_IO_PENDING 1 /* request still running */ 173#define TAPE_IO_PENDING 1 /* request still running */
169#define TAPE_IO_RETRY 2 /* retry to current request */ 174#define TAPE_IO_RETRY 2 /* retry to current request */
170#define TAPE_IO_STOP 3 /* stop the running request */ 175#define TAPE_IO_STOP 3 /* stop the running request */
176#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
171 177
172/* Char Frontend Data */ 178/* Char Frontend Data */
173struct tape_char_data { 179struct tape_char_data {
@@ -242,6 +248,10 @@ struct tape_device {
242 248
243 /* Function to start or stop the next request later. */ 249 /* Function to start or stop the next request later. */
244 struct delayed_work tape_dnr; 250 struct delayed_work tape_dnr;
251
252 /* Timer for long busy */
253 struct timer_list lb_timeout;
254
245}; 255};
246 256
247/* Externals from tape_core.c */ 257/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 9df912f63188..50f5edab83d7 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.c 2 * drivers/s390/char/tape_3590.c
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/bio.h> 13#include <linux/bio.h>
14#include <asm/ebcdic.h>
14 15
15#define TAPE_DBF_AREA tape_3590_dbf 16#define TAPE_DBF_AREA tape_3590_dbf
16 17
@@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA);
30 * - Read Device (buffered) log: BRA 31 * - Read Device (buffered) log: BRA
31 * - Read Library log: BRA 32 * - Read Library log: BRA
32 * - Swap Devices: BRA 33 * - Swap Devices: BRA
33 * - Long Busy: BRA 34 * - Long Busy: implemented
34 * - Special Intercept: BRA 35 * - Special Intercept: BRA
35 * - Read Alternate: implemented 36 * - Read Alternate: implemented
36 *******************************************************************/ 37 *******************************************************************/
@@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = {
94 [0xae] = "Subsystem environmental alert", 95 [0xae] = "Subsystem environmental alert",
95}; 96};
96 97
98static int crypt_supported(struct tape_device *device)
99{
100 return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device));
101}
102
103static int crypt_enabled(struct tape_device *device)
104{
105 return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device));
106}
107
108static void ext_to_int_kekl(struct tape390_kekl *in,
109 struct tape3592_kekl *out)
110{
111 int i;
112
113 memset(out, 0, sizeof(*out));
114 if (in->type == TAPE390_KEKL_TYPE_HASH)
115 out->flags |= 0x40;
116 if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH)
117 out->flags |= 0x80;
118 strncpy(out->label, in->label, 64);
119 for (i = strlen(in->label); i < sizeof(out->label); i++)
120 out->label[i] = ' ';
121 ASCEBC(out->label, sizeof(out->label));
122}
123
124static void int_to_ext_kekl(struct tape3592_kekl *in,
125 struct tape390_kekl *out)
126{
127 memset(out, 0, sizeof(*out));
128 if(in->flags & 0x40)
129 out->type = TAPE390_KEKL_TYPE_HASH;
130 else
131 out->type = TAPE390_KEKL_TYPE_LABEL;
132 if(in->flags & 0x80)
133 out->type_on_tape = TAPE390_KEKL_TYPE_HASH;
134 else
135 out->type_on_tape = TAPE390_KEKL_TYPE_LABEL;
136 memcpy(out->label, in->label, sizeof(in->label));
137 EBCASC(out->label, sizeof(in->label));
138 strstrip(out->label);
139}
140
141static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in,
142 struct tape390_kekl_pair *out)
143{
144 if (in->count == 0) {
145 out->kekl[0].type = TAPE390_KEKL_TYPE_NONE;
146 out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE;
147 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
148 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
149 } else if (in->count == 1) {
150 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
151 out->kekl[1].type = TAPE390_KEKL_TYPE_NONE;
152 out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE;
153 } else if (in->count == 2) {
154 int_to_ext_kekl(&in->kekl[0], &out->kekl[0]);
155 int_to_ext_kekl(&in->kekl[1], &out->kekl[1]);
156 } else {
157 printk("Invalid KEKL number: %d\n", in->count);
158 BUG();
159 }
160}
161
162static int check_ext_kekl(struct tape390_kekl *kekl)
163{
164 if (kekl->type == TAPE390_KEKL_TYPE_NONE)
165 goto invalid;
166 if (kekl->type > TAPE390_KEKL_TYPE_HASH)
167 goto invalid;
168 if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE)
169 goto invalid;
170 if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH)
171 goto invalid;
172 if ((kekl->type == TAPE390_KEKL_TYPE_HASH) &&
173 (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL))
174 goto invalid;
175
176 return 0;
177invalid:
178 return -EINVAL;
179}
180
181static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls)
182{
183 if (check_ext_kekl(&kekls->kekl[0]))
184 goto invalid;
185 if (check_ext_kekl(&kekls->kekl[1]))
186 goto invalid;
187
188 return 0;
189invalid:
190 return -EINVAL;
191}
192
193/*
194 * Query KEKLs
195 */
196static int tape_3592_kekl_query(struct tape_device *device,
197 struct tape390_kekl_pair *ext_kekls)
198{
199 struct tape_request *request;
200 struct tape3592_kekl_query_order *order;
201 struct tape3592_kekl_query_data *int_kekls;
202 int rc;
203
204 DBF_EVENT(6, "tape3592_kekl_query\n");
205 int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA);
206 if (!int_kekls)
207 return -ENOMEM;
208 request = tape_alloc_request(2, sizeof(*order));
209 if (IS_ERR(request)) {
210 rc = PTR_ERR(request);
211 goto fail_malloc;
212 }
213 order = request->cpdata;
214 memset(order,0,sizeof(*order));
215 order->code = 0xe2;
216 order->max_count = 2;
217 request->op = TO_KEKL_QUERY;
218 tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
219 tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls),
220 int_kekls);
221 rc = tape_do_io(device, request);
222 if (rc)
223 goto fail_request;
224 int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls);
225
226 rc = 0;
227fail_request:
228 tape_free_request(request);
229fail_malloc:
230 kfree(int_kekls);
231 return rc;
232}
233
234/*
235 * IOCTL: Query KEKLs
236 */
237static int tape_3592_ioctl_kekl_query(struct tape_device *device,
238 unsigned long arg)
239{
240 int rc;
241 struct tape390_kekl_pair *ext_kekls;
242
243 DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n");
244 if (!crypt_supported(device))
245 return -ENOSYS;
246 if (!crypt_enabled(device))
247 return -EUNATCH;
248 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
249 if (!ext_kekls)
250 return -ENOMEM;
251 rc = tape_3592_kekl_query(device, ext_kekls);
252 if (rc != 0)
253 goto fail;
254 if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) {
255 rc = -EFAULT;
256 goto fail;
257 }
258 rc = 0;
259fail:
260 kfree(ext_kekls);
261 return rc;
262}
263
264static int tape_3590_mttell(struct tape_device *device, int mt_count);
265
266/*
267 * Set KEKLs
268 */
269static int tape_3592_kekl_set(struct tape_device *device,
270 struct tape390_kekl_pair *ext_kekls)
271{
272 struct tape_request *request;
273 struct tape3592_kekl_set_order *order;
274
275 DBF_EVENT(6, "tape3592_kekl_set\n");
276 if (check_ext_kekl_pair(ext_kekls)) {
277 DBF_EVENT(6, "invalid kekls\n");
278 return -EINVAL;
279 }
280 if (tape_3590_mttell(device, 0) != 0)
281 return -EBADSLT;
282 request = tape_alloc_request(1, sizeof(*order));
283 if (IS_ERR(request))
284 return PTR_ERR(request);
285 order = request->cpdata;
286 memset(order, 0, sizeof(*order));
287 order->code = 0xe3;
288 order->kekls.count = 2;
289 ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]);
290 ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]);
291 request->op = TO_KEKL_SET;
292 tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order);
293
294 return tape_do_io_free(device, request);
295}
296
297/*
298 * IOCTL: Set KEKLs
299 */
300static int tape_3592_ioctl_kekl_set(struct tape_device *device,
301 unsigned long arg)
302{
303 int rc;
304 struct tape390_kekl_pair *ext_kekls;
305
306 DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n");
307 if (!crypt_supported(device))
308 return -ENOSYS;
309 if (!crypt_enabled(device))
310 return -EUNATCH;
311 ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL);
312 if (!ext_kekls)
313 return -ENOMEM;
314 if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) {
315 rc = -EFAULT;
316 goto out;
317 }
318 rc = tape_3592_kekl_set(device, ext_kekls);
319out:
320 kfree(ext_kekls);
321 return rc;
322}
323
324/*
325 * Enable encryption
326 */
327static int tape_3592_enable_crypt(struct tape_device *device)
328{
329 struct tape_request *request;
330 char *data;
331
332 DBF_EVENT(6, "tape_3592_enable_crypt\n");
333 if (!crypt_supported(device))
334 return -ENOSYS;
335 request = tape_alloc_request(2, 72);
336 if (IS_ERR(request))
337 return PTR_ERR(request);
338 data = request->cpdata;
339 memset(data,0,72);
340
341 data[0] = 0x05;
342 data[36 + 0] = 0x03;
343 data[36 + 1] = 0x03;
344 data[36 + 4] = 0x40;
345 data[36 + 6] = 0x01;
346 data[36 + 14] = 0x2f;
347 data[36 + 18] = 0xc3;
348 data[36 + 35] = 0x72;
349 request->op = TO_CRYPT_ON;
350 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
351 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
352 return tape_do_io_free(device, request);
353}
354
355/*
356 * Disable encryption
357 */
358static int tape_3592_disable_crypt(struct tape_device *device)
359{
360 struct tape_request *request;
361 char *data;
362
363 DBF_EVENT(6, "tape_3592_disable_crypt\n");
364 if (!crypt_supported(device))
365 return -ENOSYS;
366 request = tape_alloc_request(2, 72);
367 if (IS_ERR(request))
368 return PTR_ERR(request);
369 data = request->cpdata;
370 memset(data,0,72);
371
372 data[0] = 0x05;
373 data[36 + 0] = 0x03;
374 data[36 + 1] = 0x03;
375 data[36 + 35] = 0x32;
376
377 request->op = TO_CRYPT_OFF;
378 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
379 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
380
381 return tape_do_io_free(device, request);
382}
383
384/*
385 * IOCTL: Set encryption status
386 */
387static int tape_3592_ioctl_crypt_set(struct tape_device *device,
388 unsigned long arg)
389{
390 struct tape390_crypt_info info;
391
392 DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n");
393 if (!crypt_supported(device))
394 return -ENOSYS;
395 if (copy_from_user(&info, (char __user *)arg, sizeof(info)))
396 return -EFAULT;
397 if (info.status & ~TAPE390_CRYPT_ON_MASK)
398 return -EINVAL;
399 if (info.status & TAPE390_CRYPT_ON_MASK)
400 return tape_3592_enable_crypt(device);
401 else
402 return tape_3592_disable_crypt(device);
403}
404
405static int tape_3590_sense_medium(struct tape_device *device);
406
407/*
408 * IOCTL: Query enryption status
409 */
410static int tape_3592_ioctl_crypt_query(struct tape_device *device,
411 unsigned long arg)
412{
413 DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n");
414 if (!crypt_supported(device))
415 return -ENOSYS;
416 tape_3590_sense_medium(device);
417 if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device),
418 sizeof(TAPE_3590_CRYPT_INFO(device))))
419 return -EFAULT;
420 else
421 return 0;
422}
423
97/* 424/*
98 * 3590 IOCTL Overload 425 * 3590 IOCTL Overload
99 */ 426 */
@@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
109 436
110 return tape_std_display(device, &disp); 437 return tape_std_display(device, &disp);
111 } 438 }
439 case TAPE390_KEKL_SET:
440 return tape_3592_ioctl_kekl_set(device, arg);
441 case TAPE390_KEKL_QUERY:
442 return tape_3592_ioctl_kekl_query(device, arg);
443 case TAPE390_CRYPT_SET:
444 return tape_3592_ioctl_crypt_set(device, arg);
445 case TAPE390_CRYPT_QUERY:
446 return tape_3592_ioctl_crypt_query(device, arg);
112 default: 447 default:
113 return -EINVAL; /* no additional ioctls */ 448 return -EINVAL; /* no additional ioctls */
114 } 449 }
@@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work)
248 case TO_READ_ATTMSG: 583 case TO_READ_ATTMSG:
249 tape_3590_read_attmsg(p->device); 584 tape_3590_read_attmsg(p->device);
250 break; 585 break;
586 case TO_CRYPT_ON:
587 tape_3592_enable_crypt(p->device);
588 break;
589 case TO_CRYPT_OFF:
590 tape_3592_disable_crypt(p->device);
591 break;
251 default: 592 default:
252 DBF_EVENT(3, "T3590: work handler undefined for " 593 DBF_EVENT(3, "T3590: work handler undefined for "
253 "operation 0x%02x\n", p->op); 594 "operation 0x%02x\n", p->op);
@@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
365} 706}
366#endif 707#endif
367 708
709static void tape_3590_med_state_set(struct tape_device *device,
710 struct tape_3590_med_sense *sense)
711{
712 struct tape390_crypt_info *c_info;
713
714 c_info = &TAPE_3590_CRYPT_INFO(device);
715
716 if (sense->masst == MSENSE_UNASSOCIATED) {
717 tape_med_state_set(device, MS_UNLOADED);
718 TAPE_3590_CRYPT_INFO(device).medium_status = 0;
719 return;
720 }
721 if (sense->masst != MSENSE_ASSOCIATED_MOUNT) {
722 PRINT_ERR("Unknown medium state: %x\n", sense->masst);
723 return;
724 }
725 tape_med_state_set(device, MS_LOADED);
726 c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK;
727 if (sense->flags & MSENSE_CRYPT_MASK) {
728 PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags);
729 c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK;
730 } else {
731 DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags);
732 c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK;
733 }
734}
735
368/* 736/*
369 * The done handler is called at device/channel end and wakes up the sleeping 737 * The done handler is called at device/channel end and wakes up the sleeping
370 * process 738 * process
@@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request)
372static int 740static int
373tape_3590_done(struct tape_device *device, struct tape_request *request) 741tape_3590_done(struct tape_device *device, struct tape_request *request)
374{ 742{
375 struct tape_3590_med_sense *sense; 743 struct tape_3590_disc_data *disc_data;
376 744
377 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); 745 DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]);
746 disc_data = device->discdata;
378 747
379 switch (request->op) { 748 switch (request->op) {
380 case TO_BSB: 749 case TO_BSB:
@@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
394 break; 763 break;
395 case TO_RUN: 764 case TO_RUN:
396 tape_med_state_set(device, MS_UNLOADED); 765 tape_med_state_set(device, MS_UNLOADED);
766 tape_3590_schedule_work(device, TO_CRYPT_OFF);
397 break; 767 break;
398 case TO_MSEN: 768 case TO_MSEN:
399 sense = (struct tape_3590_med_sense *) request->cpdata; 769 tape_3590_med_state_set(device, request->cpdata);
400 if (sense->masst == MSENSE_UNASSOCIATED) 770 break;
401 tape_med_state_set(device, MS_UNLOADED); 771 case TO_CRYPT_ON:
402 if (sense->masst == MSENSE_ASSOCIATED_MOUNT) 772 TAPE_3590_CRYPT_INFO(device).status
403 tape_med_state_set(device, MS_LOADED); 773 |= TAPE390_CRYPT_ON_MASK;
774 *(device->modeset_byte) |= 0x03;
775 break;
776 case TO_CRYPT_OFF:
777 TAPE_3590_CRYPT_INFO(device).status
778 &= ~TAPE390_CRYPT_ON_MASK;
779 *(device->modeset_byte) &= ~0x03;
404 break; 780 break;
405 case TO_RBI: /* RBI seems to succeed even without medium loaded. */ 781 case TO_RBI: /* RBI seems to succeed even without medium loaded. */
406 case TO_NOP: /* Same to NOP. */ 782 case TO_NOP: /* Same to NOP. */
@@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
409 case TO_DIS: 785 case TO_DIS:
410 case TO_ASSIGN: 786 case TO_ASSIGN:
411 case TO_UNASSIGN: 787 case TO_UNASSIGN:
412 break;
413 case TO_SIZE: 788 case TO_SIZE:
789 case TO_KEKL_SET:
790 case TO_KEKL_QUERY:
414 break; 791 break;
415 } 792 }
416 return TAPE_IO_SUCCESS; 793 return TAPE_IO_SUCCESS;
@@ -540,10 +917,8 @@ static int
540tape_3590_erp_long_busy(struct tape_device *device, 917tape_3590_erp_long_busy(struct tape_device *device,
541 struct tape_request *request, struct irb *irb) 918 struct tape_request *request, struct irb *irb)
542{ 919{
543 /* FIXME: how about WAITING for a minute ? */ 920 DBF_EVENT(6, "Device is busy\n");
544 PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", 921 return TAPE_IO_LONG_BUSY;
545 device->cdev->dev.bus_id);
546 return tape_3590_erp_basic(device, request, irb, -EBUSY);
547} 922}
548 923
549/* 924/*
@@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb)
951 device->cdev->dev.bus_id, sense->mc); 1326 device->cdev->dev.bus_id, sense->mc);
952} 1327}
953 1328
1329static int tape_3590_crypt_error(struct tape_device *device,
1330 struct tape_request *request, struct irb *irb)
1331{
1332 u8 cu_rc, ekm_rc1;
1333 u16 ekm_rc2;
1334 u32 drv_rc;
1335 char *bus_id, *sense;
1336
1337 sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data;
1338 bus_id = device->cdev->dev.bus_id;
1339 cu_rc = sense[0];
1340 drv_rc = *((u32*) &sense[5]) & 0xffffff;
1341 ekm_rc1 = sense[9];
1342 ekm_rc2 = *((u16*) &sense[10]);
1343 if ((cu_rc == 0) && (ekm_rc2 == 0xee31))
1344 /* key not defined on EKM */
1345 return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED);
1346 if ((cu_rc == 1) || (cu_rc == 2))
1347 /* No connection to EKM */
1348 return tape_3590_erp_basic(device, request, irb, -ENOTCONN);
1349
1350 PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id);
1351 PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc,
1352 drv_rc, ekm_rc1, ekm_rc2);
1353
1354 return tape_3590_erp_basic(device, request, irb, -ENOKEY);
1355}
1356
954/* 1357/*
955 * 3590 error Recovery routine: 1358 * 3590 error Recovery routine:
956 * If possible, it tries to recover from the error. If this is not possible, 1359 * If possible, it tries to recover from the error. If this is not possible,
@@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
979 1382
980 sense = (struct tape_3590_sense *) irb->ecw; 1383 sense = (struct tape_3590_sense *) irb->ecw;
981 1384
1385 DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc);
1386
982 /* 1387 /*
983 * First check all RC-QRCs where we want to do something special 1388 * First check all RC-QRCs where we want to do something special
984 * - "break": basic error recovery is done 1389 * - "break": basic error recovery is done
@@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
999 case 0x2231: 1404 case 0x2231:
1000 tape_3590_print_era_msg(device, irb); 1405 tape_3590_print_era_msg(device, irb);
1001 return tape_3590_erp_special_interrupt(device, request, irb); 1406 return tape_3590_erp_special_interrupt(device, request, irb);
1407 case 0x2240:
1408 return tape_3590_crypt_error(device, request, irb);
1002 1409
1003 case 0x3010: 1410 case 0x3010:
1004 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", 1411 DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n",
@@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1020 DBF_EVENT(2, "(%08x): Rewind Unload complete\n", 1427 DBF_EVENT(2, "(%08x): Rewind Unload complete\n",
1021 device->cdev_id); 1428 device->cdev_id);
1022 tape_med_state_set(device, MS_UNLOADED); 1429 tape_med_state_set(device, MS_UNLOADED);
1430 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1023 return tape_3590_erp_basic(device, request, irb, 0); 1431 return tape_3590_erp_basic(device, request, irb, 0);
1024 1432
1025 case 0x4010: 1433 case 0x4010:
@@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1030 PRINT_WARN("(%s): Tape operation when medium not loaded\n", 1438 PRINT_WARN("(%s): Tape operation when medium not loaded\n",
1031 device->cdev->dev.bus_id); 1439 device->cdev->dev.bus_id);
1032 tape_med_state_set(device, MS_UNLOADED); 1440 tape_med_state_set(device, MS_UNLOADED);
1441 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1033 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1442 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1034 case 0x4012: /* Device Long Busy */ 1443 case 0x4012: /* Device Long Busy */
1444 /* XXX: Also use long busy handling here? */
1445 DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id);
1035 tape_3590_print_era_msg(device, irb); 1446 tape_3590_print_era_msg(device, irb);
1447 return tape_3590_erp_basic(device, request, irb, -EBUSY);
1448 case 0x4014:
1449 DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id);
1036 return tape_3590_erp_long_busy(device, request, irb); 1450 return tape_3590_erp_long_busy(device, request, irb);
1037 1451
1038 case 0x5010: 1452 case 0x5010:
@@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request,
1064 case 0x5120: 1478 case 0x5120:
1065 case 0x1120: 1479 case 0x1120:
1066 tape_med_state_set(device, MS_UNLOADED); 1480 tape_med_state_set(device, MS_UNLOADED);
1481 tape_3590_schedule_work(device, TO_CRYPT_OFF);
1067 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); 1482 return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM);
1068 1483
1069 case 0x6020: 1484 case 0x6020:
@@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device)
1142{ 1557{
1143 int rc; 1558 int rc;
1144 struct tape_3590_disc_data *data; 1559 struct tape_3590_disc_data *data;
1560 char *rdc_data;
1145 1561
1146 DBF_EVENT(6, "3590 device setup\n"); 1562 DBF_EVENT(6, "3590 device setup\n");
1147 data = kmalloc(sizeof(struct tape_3590_disc_data), 1563 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
1148 GFP_KERNEL | GFP_DMA);
1149 if (data == NULL) 1564 if (data == NULL)
1150 return -ENOMEM; 1565 return -ENOMEM;
1151 data->read_back_op = READ_PREVIOUS; 1566 data->read_back_op = READ_PREVIOUS;
1152 device->discdata = data; 1567 device->discdata = data;
1153 1568
1154 if ((rc = tape_std_assign(device)) == 0) { 1569 rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA);
1155 /* Try to find out if medium is loaded */ 1570 if (!rdc_data) {
1156 if ((rc = tape_3590_sense_medium(device)) != 0) 1571 rc = -ENOMEM;
1157 DBF_LH(3, "3590 medium sense returned %d\n", rc); 1572 goto fail_kmalloc;
1573 }
1574 rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64);
1575 if (rc) {
1576 DBF_LH(3, "Read device characteristics failed!\n");
1577 goto fail_kmalloc;
1578 }
1579 rc = tape_std_assign(device);
1580 if (rc)
1581 goto fail_rdc_data;
1582 if (rdc_data[31] == 0x13) {
1583 PRINT_INFO("Device has crypto support\n");
1584 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1585 tape_3592_disable_crypt(device);
1586 } else {
1587 DBF_EVENT(6, "Device has NO crypto support\n");
1158 } 1588 }
1589 /* Try to find out if medium is loaded */
1590 rc = tape_3590_sense_medium(device);
1591 if (rc) {
1592 DBF_LH(3, "3590 medium sense returned %d\n", rc);
1593 goto fail_rdc_data;
1594 }
1595 return 0;
1159 1596
1597fail_rdc_data:
1598 kfree(rdc_data);
1599fail_kmalloc:
1600 kfree(data);
1160 return rc; 1601 return rc;
1161} 1602}
1162 1603
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index cf274b9445a6..aa5138807af1 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -2,7 +2,7 @@
2 * drivers/s390/char/tape_3590.h 2 * drivers/s390/char/tape_3590.h
3 * tape device discipline for 3590 tapes. 3 * tape device discipline for 3590 tapes.
4 * 4 *
5 * Copyright (C) IBM Corp. 2001,2006 5 * Copyright IBM Corp. 2001,2006
6 * Author(s): Stefan Bader <shbader@de.ibm.com> 6 * Author(s): Stefan Bader <shbader@de.ibm.com>
7 * Michael Holzheu <holzheu@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -38,16 +38,22 @@
38#define MSENSE_UNASSOCIATED 0x00 38#define MSENSE_UNASSOCIATED 0x00
39#define MSENSE_ASSOCIATED_MOUNT 0x01 39#define MSENSE_ASSOCIATED_MOUNT 0x01
40#define MSENSE_ASSOCIATED_UMOUNT 0x02 40#define MSENSE_ASSOCIATED_UMOUNT 0x02
41#define MSENSE_CRYPT_MASK 0x00000010
41 42
42#define TAPE_3590_MAX_MSG 0xb0 43#define TAPE_3590_MAX_MSG 0xb0
43 44
44/* Datatypes */ 45/* Datatypes */
45 46
46struct tape_3590_disc_data { 47struct tape_3590_disc_data {
47 unsigned char modeset_byte; 48 struct tape390_crypt_info crypt_info;
48 int read_back_op; 49 int read_back_op;
49}; 50};
50 51
52#define TAPE_3590_CRYPT_INFO(device) \
53 ((struct tape_3590_disc_data*)(device->discdata))->crypt_info
54#define TAPE_3590_READ_BACK_OP(device) \
55 ((struct tape_3590_disc_data*)(device->discdata))->read_back_op
56
51struct tape_3590_sense { 57struct tape_3590_sense {
52 58
53 unsigned int command_rej:1; 59 unsigned int command_rej:1;
@@ -118,7 +124,48 @@ struct tape_3590_sense {
118struct tape_3590_med_sense { 124struct tape_3590_med_sense {
119 unsigned int macst:4; 125 unsigned int macst:4;
120 unsigned int masst:4; 126 unsigned int masst:4;
121 char pad[127]; 127 char pad1[7];
128 unsigned int flags;
129 char pad2[116];
130} __attribute__ ((packed));
131
132/* Datastructures for 3592 encryption support */
133
134struct tape3592_kekl {
135 __u8 flags;
136 char label[64];
137} __attribute__ ((packed));
138
139struct tape3592_kekl_pair {
140 __u8 count;
141 struct tape3592_kekl kekl[2];
142} __attribute__ ((packed));
143
144struct tape3592_kekl_query_data {
145 __u16 len;
146 __u8 fmt;
147 __u8 mc;
148 __u32 id;
149 __u8 flags;
150 struct tape3592_kekl_pair kekls;
151 char reserved[116];
152} __attribute__ ((packed));
153
154struct tape3592_kekl_query_order {
155 __u8 code;
156 __u8 flags;
157 char reserved1[2];
158 __u8 max_count;
159 char reserved2[35];
160} __attribute__ ((packed));
161
162struct tape3592_kekl_set_order {
163 __u8 code;
164 __u8 flags;
165 char reserved1[2];
166 __u8 op;
167 struct tape3592_kekl_pair kekls;
168 char reserved2[120];
122} __attribute__ ((packed)); 169} __attribute__ ((packed));
123 170
124#endif /* _TAPE_3590_H */ 171#endif /* _TAPE_3590_H */
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index c8a89b3b87d4..dd0ecaed592e 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device)
73/* 73/*
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static inline void 76static void
77tapeblock_end_request(struct request *req, int uptodate) 77tapeblock_end_request(struct request *req, int uptodate)
78{ 78{
79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 79 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
@@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
108/* 108/*
109 * Feed the tape device CCW queue with requests supplied in a list. 109 * Feed the tape device CCW queue with requests supplied in a list.
110 */ 110 */
111static inline int 111static int
112tapeblock_start_request(struct tape_device *device, struct request *req) 112tapeblock_start_request(struct tape_device *device, struct request *req)
113{ 113{
114 struct tape_request * ccw_req; 114 struct tape_request * ccw_req;
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 31198c8f2718..9faea04e11e9 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -3,7 +3,7 @@
3 * character device frontend for tape device driver 3 * character device frontend for tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device)
89 device->nt = NULL; 89 device->nt = NULL;
90} 90}
91 91
92/* 92static int
93 * Terminate write command (we write two TMs and skip backward over last)
94 * This ensures that the tape is always correctly terminated.
95 * When the user writes afterwards a new file, he will overwrite the
96 * second TM and therefore one TM will remain to separate the
97 * two files on the tape...
98 */
99static inline void
100tapechar_terminate_write(struct tape_device *device)
101{
102 if (tape_mtop(device, MTWEOF, 1) == 0 &&
103 tape_mtop(device, MTWEOF, 1) == 0)
104 tape_mtop(device, MTBSR, 1);
105}
106
107static inline int
108tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) 93tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
109{ 94{
110 struct idal_buffer *new; 95 struct idal_buffer *new;
@@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
137/* 122/*
138 * Tape device read function 123 * Tape device read function
139 */ 124 */
140ssize_t 125static ssize_t
141tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) 126tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
142{ 127{
143 struct tape_device *device; 128 struct tape_device *device;
@@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
201/* 186/*
202 * Tape device write function 187 * Tape device write function
203 */ 188 */
204ssize_t 189static ssize_t
205tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) 190tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
206{ 191{
207 struct tape_device *device; 192 struct tape_device *device;
@@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t
291/* 276/*
292 * Character frontend tape device open function. 277 * Character frontend tape device open function.
293 */ 278 */
294int 279static int
295tapechar_open (struct inode *inode, struct file *filp) 280tapechar_open (struct inode *inode, struct file *filp)
296{ 281{
297 struct tape_device *device; 282 struct tape_device *device;
@@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp)
326 * Character frontend tape device release function. 311 * Character frontend tape device release function.
327 */ 312 */
328 313
329int 314static int
330tapechar_release(struct inode *inode, struct file *filp) 315tapechar_release(struct inode *inode, struct file *filp)
331{ 316{
332 struct tape_device *device; 317 struct tape_device *device;
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index c6c2e918b990..e2a8a1a04bab 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -3,7 +3,7 @@
3 * basic function of the tape device driver 3 * basic function of the tape device driver
4 * 4 *
5 * S390 and zSeries version 5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
@@ -26,9 +26,11 @@
26#include "tape_std.h" 26#include "tape_std.h"
27 27
28#define PRINTK_HEADER "TAPE_CORE: " 28#define PRINTK_HEADER "TAPE_CORE: "
29#define LONG_BUSY_TIMEOUT 180 /* seconds */
29 30
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 31static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void tape_delayed_next_request(struct work_struct *); 32static void tape_delayed_next_request(struct work_struct *);
33static void tape_long_busy_timeout(unsigned long data);
32 34
33/* 35/*
34 * One list to contain all tape devices of all disciplines, so 36 * One list to contain all tape devices of all disciplines, so
@@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] =
69 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
70 [TO_READ_ATTMSG] = "RAT", 72 [TO_READ_ATTMSG] = "RAT",
71 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
72 [TO_UNASSIGN] = "UAS" 74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
76 [TO_KEKL_QUERY] = "KLQ",
73}; 77};
74 78
75static inline int 79static int
76busid_to_int(char *bus_id) 80busid_to_int(char *bus_id)
77{ 81{
78 int dec; 82 int dec;
@@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
252/* 256/*
253 * Stop running ccw. Has to be called with the device lock held. 257 * Stop running ccw. Has to be called with the device lock held.
254 */ 258 */
255static inline int 259static int
256__tape_cancel_io(struct tape_device *device, struct tape_request *request) 260__tape_cancel_io(struct tape_device *device, struct tape_request *request)
257{ 261{
258 int retries; 262 int retries;
@@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device,
346 return -EINVAL; 350 return -EINVAL;
347 } 351 }
348 352
353 init_timer(&device->lb_timeout);
354 device->lb_timeout.function = tape_long_busy_timeout;
355
349 /* Let the discipline have a go at the device. */ 356 /* Let the discipline have a go at the device. */
350 device->discipline = discipline; 357 device->discipline = discipline;
351 if (!try_module_get(discipline->owner)) { 358 if (!try_module_get(discipline->owner)) {
@@ -385,7 +392,7 @@ out:
385 return rc; 392 return rc;
386} 393}
387 394
388static inline void 395static void
389tape_cleanup_device(struct tape_device *device) 396tape_cleanup_device(struct tape_device *device)
390{ 397{
391 tapeblock_cleanup_device(device); 398 tapeblock_cleanup_device(device);
@@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev)
563 return ret; 570 return ret;
564} 571}
565 572
566static inline void 573static void
567__tape_discard_requests(struct tape_device *device) 574__tape_discard_requests(struct tape_device *device)
568{ 575{
569 struct tape_request * request; 576 struct tape_request * request;
@@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request)
703 kfree(request); 710 kfree(request);
704} 711}
705 712
706static inline int 713static int
707__tape_start_io(struct tape_device *device, struct tape_request *request) 714__tape_start_io(struct tape_device *device, struct tape_request *request)
708{ 715{
709 int rc; 716 int rc;
@@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
733 return rc; 740 return rc;
734} 741}
735 742
736static inline void 743static void
737__tape_start_next_request(struct tape_device *device) 744__tape_start_next_request(struct tape_device *device)
738{ 745{
739 struct list_head *l, *n; 746 struct list_head *l, *n;
@@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work)
801 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 808 spin_unlock_irq(get_ccwdev_lock(device->cdev));
802} 809}
803 810
804static inline void 811static void tape_long_busy_timeout(unsigned long data)
812{
813 struct tape_request *request;
814 struct tape_device *device;
815
816 device = (struct tape_device *) data;
817 spin_lock_irq(get_ccwdev_lock(device->cdev));
818 request = list_entry(device->req_queue.next, struct tape_request, list);
819 if (request->status != TAPE_REQUEST_LONG_BUSY)
820 BUG();
821 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
822 __tape_start_next_request(device);
823 device->lb_timeout.data = (unsigned long) tape_put_device(device);
824 spin_unlock_irq(get_ccwdev_lock(device->cdev));
825}
826
827static void
805__tape_end_request( 828__tape_end_request(
806 struct tape_device * device, 829 struct tape_device * device,
807 struct tape_request * request, 830 struct tape_request * request,
@@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
878 * and starts it if the tape is idle. Has to be called with 901 * and starts it if the tape is idle. Has to be called with
879 * the device lock held. 902 * the device lock held.
880 */ 903 */
881static inline int 904static int
882__tape_start_request(struct tape_device *device, struct tape_request *request) 905__tape_start_request(struct tape_device *device, struct tape_request *request)
883{ 906{
884 int rc; 907 int rc;
@@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1094 /* May be an unsolicited irq */ 1117 /* May be an unsolicited irq */
1095 if(request != NULL) 1118 if(request != NULL)
1096 request->rescnt = irb->scsw.count; 1119 request->rescnt = irb->scsw.count;
1097 1120 else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) &&
1121 !list_empty(&device->req_queue)) {
1122 /* Not Ready to Ready after long busy ? */
1123 struct tape_request *req;
1124 req = list_entry(device->req_queue.next,
1125 struct tape_request, list);
1126 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1127 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1128 if (del_timer(&device->lb_timeout)) {
1129 device->lb_timeout.data = (unsigned long)
1130 tape_put_device(device);
1131 __tape_start_next_request(device);
1132 }
1133 return;
1134 }
1135 }
1098 if (irb->scsw.dstat != 0x0c) { 1136 if (irb->scsw.dstat != 0x0c) {
1099 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1137 /* Set the 'ONLINE' flag depending on sense byte 1 */
1100 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1138 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
@@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1142 break; 1180 break;
1143 case TAPE_IO_PENDING: 1181 case TAPE_IO_PENDING:
1144 break; 1182 break;
1183 case TAPE_IO_LONG_BUSY:
1184 device->lb_timeout.data =
1185 (unsigned long)tape_get_device_reference(device);
1186 device->lb_timeout.expires = jiffies +
1187 LONG_BUSY_TIMEOUT * HZ;
1188 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1189 add_timer(&device->lb_timeout);
1190 request->status = TAPE_REQUEST_LONG_BUSY;
1191 break;
1145 case TAPE_IO_RETRY: 1192 case TAPE_IO_RETRY:
1146 rc = __tape_start_io(device, request); 1193 rc = __tape_start_io(device, request);
1147 if (rc) 1194 if (rc)
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 09844621edc0..bc33068b9ce2 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -36,7 +36,7 @@
36struct tty_driver *tty3270_driver; 36struct tty_driver *tty3270_driver;
37static int tty3270_max_index; 37static int tty3270_max_index;
38 38
39struct raw3270_fn tty3270_fn; 39static struct raw3270_fn tty3270_fn;
40 40
41struct tty3270_cell { 41struct tty3270_cell {
42 unsigned char character; 42 unsigned char character;
@@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *);
119/* 119/*
120 * Setup timeout for a device. On timeout trigger an update. 120 * Setup timeout for a device. On timeout trigger an update.
121 */ 121 */
122void 122static void tty3270_set_timer(struct tty3270 *tp, int expires)
123tty3270_set_timer(struct tty3270 *tp, int expires)
124{ 123{
125 if (expires == 0) { 124 if (expires == 0) {
126 if (timer_pending(&tp->timer) && del_timer(&tp->timer)) 125 if (timer_pending(&tp->timer) && del_timer(&tp->timer))
@@ -841,7 +840,7 @@ tty3270_del_views(void)
841 } 840 }
842} 841}
843 842
844struct raw3270_fn tty3270_fn = { 843static struct raw3270_fn tty3270_fn = {
845 .activate = tty3270_activate, 844 .activate = tty3270_activate,
846 .deactivate = tty3270_deactivate, 845 .deactivate = tty3270_deactivate,
847 .intv = (void *) tty3270_irq, 846 .intv = (void *) tty3270_irq,
@@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = {
1754 .set_termios = tty3270_set_termios 1753 .set_termios = tty3270_set_termios
1755}; 1754};
1756 1755
1757void 1756static void tty3270_notifier(int index, int active)
1758tty3270_notifier(int index, int active)
1759{ 1757{
1760 if (active) 1758 if (active)
1761 tty_register_device(tty3270_driver, index, NULL); 1759 tty_register_device(tty3270_driver, index, NULL);
@@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active)
1767 * 3270 tty registration code called from tty_init(). 1765 * 3270 tty registration code called from tty_init().
1768 * Most kernel services (incl. kmalloc) are available at this poimt. 1766 * Most kernel services (incl. kmalloc) are available at this poimt.
1769 */ 1767 */
1770int __init 1768static int __init tty3270_init(void)
1771tty3270_init(void)
1772{ 1769{
1773 struct tty_driver *driver; 1770 struct tty_driver *driver;
1774 int ret; 1771 int ret;
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6cb23040954b..4f894dc2373b 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -128,9 +128,8 @@ static iucv_interrupt_ops_t vmlogrdr_iucvops = {
128 .MessagePending = vmlogrdr_iucv_MessagePending, 128 .MessagePending = vmlogrdr_iucv_MessagePending,
129}; 129};
130 130
131 131static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
132DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); 132static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
133DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
134 133
135/* 134/*
136 * pointer to system service private structure 135 * pointer to system service private structure
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 12c2d6b746e6..aa65df4dfced 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -43,7 +43,7 @@ typedef enum {add, free} range_action;
43 * Function: blacklist_range 43 * Function: blacklist_range
44 * (Un-)blacklist the devices from-to 44 * (Un-)blacklist the devices from-to
45 */ 45 */
46static inline void 46static void
47blacklist_range (range_action action, unsigned int from, unsigned int to, 47blacklist_range (range_action action, unsigned int from, unsigned int to,
48 unsigned int ssid) 48 unsigned int ssid)
49{ 49{
@@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to,
69 * Get devno/busid from given string. 69 * Get devno/busid from given string.
70 * Shamelessly grabbed from dasd_devmap.c. 70 * Shamelessly grabbed from dasd_devmap.c.
71 */ 71 */
72static inline int 72static int
73blacklist_busid(char **str, int *id0, int *ssid, int *devno) 73blacklist_busid(char **str, int *id0, int *ssid, int *devno)
74{ 74{
75 int val, old_style; 75 int val, old_style;
@@ -123,10 +123,10 @@ confused:
123 return 1; 123 return 1;
124} 124}
125 125
126static inline int 126static int
127blacklist_parse_parameters (char *str, range_action action) 127blacklist_parse_parameters (char *str, range_action action)
128{ 128{
129 unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; 129 int from, to, from_id0, to_id0, from_ssid, to_ssid;
130 130
131 while (*str != 0 && *str != '\n') { 131 while (*str != 0 && *str != '\n') {
132 range_action ra = action; 132 range_action ra = action;
@@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno)
227 * Function: blacklist_parse_proc_parameters 227 * Function: blacklist_parse_proc_parameters
228 * parse the stuff which is piped to /proc/cio_ignore 228 * parse the stuff which is piped to /proc/cio_ignore
229 */ 229 */
230static inline void 230static void
231blacklist_parse_proc_parameters (char *buf) 231blacklist_parse_proc_parameters (char *buf)
232{ 232{
233 if (strncmp (buf, "free ", 5) == 0) { 233 if (strncmp (buf, "free ", 5) == 0) {
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 38954f5cd14c..d48e3ca4752c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
53 53
54static struct bus_type ccwgroup_bus_type; 54static struct bus_type ccwgroup_bus_type;
55 55
56static inline void 56static void
57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) 57__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
58{ 58{
59 int i; 59 int i;
@@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev)
104 kfree(gdev); 104 kfree(gdev);
105} 105}
106 106
107static inline int 107static int
108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev) 108__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
109{ 109{
110 char str[8]; 110 char str[8];
@@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev)
424 return 0; 424 return 0;
425} 425}
426 426
427static inline struct ccwgroup_device * 427static struct ccwgroup_device *
428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) 428__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
429{ 429{
430 struct ccwgroup_device *gdev; 430 struct ccwgroup_device *gdev;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index cbab8d2ce5cf..6f05a44e3817 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
93 u16 sch; /* subchannel */ 93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */ 94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */ 95 u16 fla[8]; /* full link addresses 0-7 */
96 } *ssd_area; 96 } __attribute__ ((packed)) *ssd_area;
97 97
98 ssd_area = page; 98 ssd_area = page;
99 99
@@ -277,7 +277,7 @@ out_unreg:
277 return 0; 277 return 0;
278} 278}
279 279
280static inline void 280static void
281s390_set_chpid_offline( __u8 chpid) 281s390_set_chpid_offline( __u8 chpid)
282{ 282{
283 char dbf_txt[15]; 283 char dbf_txt[15];
@@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
338 return 0x80 >> chp; 338 return 0x80 >> chp;
339} 339}
340 340
341static inline int 341static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 342s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 343{
344 struct schib schib; 344 struct schib schib;
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data)
444 u32 andesc[28]; 444 u32 andesc[28];
445 /* incident-specific information */ 445 /* incident-specific information */
446 u32 isinfo[28]; 446 u32 isinfo[28];
447 } *lir; 447 } __attribute__ ((packed)) *lir;
448 448
449 lir = data; 449 lir = data;
450 if (!(lir->iq&0x80)) 450 if (!(lir->iq&0x80))
@@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data)
461 return (u16) (lir->indesc[0]&0x000000ff); 461 return (u16) (lir->indesc[0]&0x000000ff);
462} 462}
463 463
464int 464struct chsc_sei_area {
465chsc_process_crw(void) 465 struct chsc_header request;
466 u32 reserved1;
467 u32 reserved2;
468 u32 reserved3;
469 struct chsc_header response;
470 u32 reserved4;
471 u8 flags;
472 u8 vf; /* validity flags */
473 u8 rs; /* reporting source */
474 u8 cc; /* content code */
475 u16 fla; /* full link address */
476 u16 rsid; /* reporting source id */
477 u32 reserved5;
478 u32 reserved6;
479 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
480 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed));
482
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{
485 int chpid;
486
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4)
490 return 0;
491 chpid = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else
495 s390_set_chpid_offline(chpid);
496
497 return 0;
498}
499
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
466{ 501{
467 int chpid, ret;
468 struct res_acc_data res_data; 502 struct res_acc_data res_data;
469 struct { 503 struct device *dev;
470 struct chsc_header request; 504 int status;
471 u32 reserved1; 505 int rc;
472 u32 reserved2; 506
473 u32 reserved3; 507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
474 struct chsc_header response; 508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
475 u32 reserved4; 509 if (sei_area->rs != 4)
476 u8 flags; 510 return 0;
477 u8 vf; /* validity flags */ 511 /* allocate a new channel path structure, if needed */
478 u8 rs; /* reporting source */ 512 status = get_chp_status(sei_area->rsid);
479 u8 cc; /* content code */ 513 if (status < 0)
480 u16 fla; /* full link address */ 514 new_channel_path(sei_area->rsid);
481 u16 rsid; /* reporting source id */ 515 else if (!status)
482 u32 reserved5; 516 return 0;
483 u32 reserved6; 517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
484 u32 ccdf[96]; /* content-code dependent field */ 518 memset(&res_data, 0, sizeof(struct res_acc_data));
485 /* ccdf has to be big enough for a link-incident record */ 519 res_data.chp = to_channelpath(dev);
486 } *sei_area; 520 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0)
523 /* full link address */
524 res_data.fla_mask = 0xffff;
525 else
526 /* link address */
527 res_data.fla_mask = 0xff00;
528 }
529 rc = s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533}
534
535static int chsc_process_sei(struct chsc_sei_area *sei_area)
536{
537 int rc;
538
539 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40)
541 CIO_CRW_EVENT(2, "chsc: event overflow\n");
542 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) {
545 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area);
547 break;
548 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area);
550 break;
551 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc);
554 break;
555 }
556
557 return rc;
558}
559
560int chsc_process_crw(void)
561{
562 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
487 565
488 if (!sei_page) 566 if (!sei_page)
489 return 0; 567 return 0;
490 /* 568 /* Access to sei_page is serialized through machine check handler
491 * build the chsc request block for store event information 569 * thread, so no need for locking. */
492 * and do the call
493 * This function is only called by the machine check handler thread,
494 * so we don't need locking for the sei_page.
495 */
496 sei_area = sei_page; 570 sei_area = sei_page;
497 571
498 CIO_TRACE_EVENT( 2, "prcss"); 572 CIO_TRACE_EVENT( 2, "prcss");
499 ret = 0; 573 ret = 0;
500 do { 574 do {
501 int ccode, status;
502 struct device *dev;
503 memset(sei_area, 0, sizeof(*sei_area)); 575 memset(sei_area, 0, sizeof(*sei_area));
504 memset(&res_data, 0, sizeof(struct res_acc_data));
505 sei_area->request.length = 0x0010; 576 sei_area->request.length = 0x0010;
506 sei_area->request.code = 0x000e; 577 sei_area->request.code = 0x000e;
578 if (chsc(sei_area))
579 break;
507 580
508 ccode = chsc(sei_area); 581 if (sei_area->response.code == 0x0001) {
509 if (ccode > 0) 582 CIO_CRW_EVENT(4, "chsc: sei successful\n");
510 return 0; 583 rc = chsc_process_sei(sei_area);
511 584 if (rc)
512 switch (sei_area->response.code) { 585 ret = rc;
513 /* for debug purposes, check for problems */ 586 } else {
514 case 0x0001: 587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
515 CIO_CRW_EVENT(4, "chsc_process_crw: event information "
516 "successfully stored\n");
517 break; /* everything ok */
518 case 0x0002:
519 CIO_CRW_EVENT(2,
520 "chsc_process_crw: invalid command!\n");
521 return 0;
522 case 0x0003:
523 CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc "
524 "request block!\n");
525 return 0;
526 case 0x0005:
527 CIO_CRW_EVENT(2, "chsc_process_crw: no event "
528 "information stored\n");
529 return 0;
530 default:
531 CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n",
532 sei_area->response.code); 588 sei_area->response.code);
533 return 0; 589 ret = 0;
534 }
535
536 /* Check if we might have lost some information. */
537 if (sei_area->flags & 0x40)
538 CIO_CRW_EVENT(2, "chsc_process_crw: Event information "
539 "has been lost due to overflow!\n");
540
541 if (sei_area->rs != 4) {
542 CIO_CRW_EVENT(2, "chsc_process_crw: reporting source "
543 "(%04X) isn't a chpid!\n",
544 sei_area->rsid);
545 continue;
546 }
547
548 /* which kind of information was stored? */
549 switch (sei_area->cc) {
550 case 1: /* link incident*/
551 CIO_CRW_EVENT(4, "chsc_process_crw: "
552 "channel subsystem reports link incident,"
553 " reporting source is chpid %x\n",
554 sei_area->rsid);
555 chpid = __get_chpid_from_lir(sei_area->ccdf);
556 if (chpid < 0)
557 CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n",
558 __FUNCTION__);
559 else
560 s390_set_chpid_offline(chpid);
561 break;
562
563 case 2: /* i/o resource accessibiliy */
564 CIO_CRW_EVENT(4, "chsc_process_crw: "
565 "channel subsystem reports some I/O "
566 "devices may have become accessible\n");
567 pr_debug("Data received after sei: \n");
568 pr_debug("Validity flags: %x\n", sei_area->vf);
569
570 /* allocate a new channel path structure, if needed */
571 status = get_chp_status(sei_area->rsid);
572 if (status < 0)
573 new_channel_path(sei_area->rsid);
574 else if (!status)
575 break;
576 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
577 res_data.chp = to_channelpath(dev);
578 pr_debug("chpid: %x", sei_area->rsid);
579 if ((sei_area->vf & 0xc0) != 0) {
580 res_data.fla = sei_area->fla;
581 if ((sei_area->vf & 0xc0) == 0xc0) {
582 pr_debug(" full link addr: %x",
583 sei_area->fla);
584 res_data.fla_mask = 0xffff;
585 } else {
586 pr_debug(" link addr: %x",
587 sei_area->fla);
588 res_data.fla_mask = 0xff00;
589 }
590 }
591 ret = s390_process_res_acc(&res_data);
592 pr_debug("\n\n");
593 put_device(dev);
594 break;
595
596 default: /* other stuff */
597 CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n",
598 sei_area->cc);
599 break; 590 break;
600 } 591 }
601 } while (sei_area->flags & 0x80); 592 } while (sei_area->flags & 0x80);
593
602 return ret; 594 return ret;
603} 595}
604 596
605static inline int 597static int
606__chp_add_new_sch(struct subchannel_id schid) 598__chp_add_new_sch(struct subchannel_id schid)
607{ 599{
608 struct schib schib; 600 struct schib schib;
609 int ret; 601 int ret;
610 602
611 if (stsch(schid, &schib)) 603 if (stsch_err(schid, &schib))
612 /* We're through */ 604 /* We're through */
613 return need_rescan ? -EAGAIN : -ENXIO; 605 return need_rescan ? -EAGAIN : -ENXIO;
614 606
@@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on)
709 return chp_add(chpid); 701 return chp_add(chpid);
710} 702}
711 703
712static inline int check_for_io_on_path(struct subchannel *sch, int index) 704static int check_for_io_on_path(struct subchannel *sch, int index)
713{ 705{
714 int cc; 706 int cc;
715 707
@@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch)
741 sch->driver->termination(&sch->dev); 733 sch->driver->termination(&sch->dev);
742} 734}
743 735
744static inline void 736static void
745__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) 737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
746{ 738{
747 int chp, old_lpm; 739 int chp, old_lpm;
@@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = {
967static void 959static void
968chsc_remove_chp_cmg_attr(struct channel_path *chp) 960chsc_remove_chp_cmg_attr(struct channel_path *chp)
969{ 961{
970 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); 962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
971 sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); 963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
972} 964}
973 965
974static int 966static int
@@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp)
976{ 968{
977 int ret; 969 int ret;
978 970
979 ret = sysfs_create_bin_file(&chp->dev.kobj, 971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
980 &chp_measurement_chars_attr);
981 if (ret) 972 if (ret)
982 return ret; 973 return ret;
983 ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); 974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
984 if (ret) 975 if (ret)
985 sysfs_remove_bin_file(&chp->dev.kobj, 976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
986 &chp_measurement_chars_attr);
987 return ret; 977 return ret;
988} 978}
989 979
@@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1042 u32 : 4; 1032 u32 : 4;
1043 u32 fmt : 4; 1033 u32 fmt : 4;
1044 u32 : 16; 1034 u32 : 16;
1045 } *secm_area; 1035 } __attribute__ ((packed)) *secm_area;
1046 int ret, ccode; 1036 int ret, ccode;
1047 1037
1048 secm_area = page; 1038 secm_area = page;
@@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid,
1253 struct chsc_header response; 1243 struct chsc_header response;
1254 u32 zeroes2; 1244 u32 zeroes2;
1255 struct channel_path_desc desc; 1245 struct channel_path_desc desc;
1256 } *scpd_area; 1246 } __attribute__ ((packed)) *scpd_area;
1257 1247
1258 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1248 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1259 if (!scpd_area) 1249 if (!scpd_area)
@@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1350 u32 cmg : 8; 1340 u32 cmg : 8;
1351 u32 zeroes3; 1341 u32 zeroes3;
1352 u32 data[NR_MEASUREMENT_CHARS]; 1342 u32 data[NR_MEASUREMENT_CHARS];
1353 } *scmc_area; 1343 } __attribute__ ((packed)) *scmc_area;
1354 1344
1355 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1345 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1356 if (!scmc_area) 1346 if (!scmc_area)
@@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code)
1517 u32 reserved5:4; 1507 u32 reserved5:4;
1518 u32 format2:4; 1508 u32 format2:4;
1519 u32 reserved6:24; 1509 u32 reserved6:24;
1520 } *sda_area; 1510 } __attribute__ ((packed)) *sda_area;
1521 1511
1522 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 1512 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1523 if (!sda_area) 1513 if (!sda_area)
@@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void)
1569 u32 reserved4; 1559 u32 reserved4;
1570 u32 general_char[510]; 1560 u32 general_char[510];
1571 u32 chsc_char[518]; 1561 u32 chsc_char[518];
1572 } *scsc_area; 1562 } __attribute__ ((packed)) *scsc_area;
1573 1563
1574 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1564 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1575 if (!scsc_area) { 1565 if (!scsc_area) {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index a259245780ae..0fb2b024208f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -10,17 +10,17 @@
10struct chsc_header { 10struct chsc_header {
11 u16 length; 11 u16 length;
12 u16 code; 12 u16 code;
13}; 13} __attribute__ ((packed));
14 14
15#define NR_MEASUREMENT_CHARS 5 15#define NR_MEASUREMENT_CHARS 5
16struct cmg_chars { 16struct cmg_chars {
17 u32 values[NR_MEASUREMENT_CHARS]; 17 u32 values[NR_MEASUREMENT_CHARS];
18}; 18} __attribute__ ((packed));
19 19
20#define NR_MEASUREMENT_ENTRIES 8 20#define NR_MEASUREMENT_ENTRIES 8
21struct cmg_entry { 21struct cmg_entry {
22 u32 values[NR_MEASUREMENT_ENTRIES]; 22 u32 values[NR_MEASUREMENT_ENTRIES];
23}; 23} __attribute__ ((packed));
24 24
25struct channel_path_desc { 25struct channel_path_desc {
26 u8 flags; 26 u8 flags;
@@ -31,7 +31,7 @@ struct channel_path_desc {
31 u8 zeroes; 31 u8 zeroes;
32 u8 chla; 32 u8 chla;
33 u8 chpp; 33 u8 chpp;
34}; 34} __attribute__ ((packed));
35 35
36struct channel_path { 36struct channel_path {
37 int id; 37 int id;
@@ -47,6 +47,9 @@ struct channel_path {
47extern void s390_process_css( void ); 47extern void s390_process_css( void );
48extern void chsc_validate_chpids(struct subchannel *); 48extern void chsc_validate_chpids(struct subchannel *);
49extern void chpid_is_actually_online(int); 49extern void chpid_is_actually_online(int);
50extern int css_get_ssd_info(struct subchannel *);
51extern int chsc_process_crw(void);
52extern int chp_process_crw(int, int);
50 53
51struct css_general_char { 54struct css_general_char {
52 u64 : 41; 55 u64 : 41;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index ae1bf231d089..b3a56dc5f68a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch)
122 * Use tpi to get a pending interrupt, call the interrupt handler and 122 * Use tpi to get a pending interrupt, call the interrupt handler and
123 * return a pointer to the subchannel structure. 123 * return a pointer to the subchannel structure.
124 */ 124 */
125static inline int 125static int
126cio_tpi(void) 126cio_tpi(void)
127{ 127{
128 struct tpi_info *tpi_info; 128 struct tpi_info *tpi_info;
@@ -152,7 +152,7 @@ cio_tpi(void)
152 return 1; 152 return 1;
153} 153}
154 154
155static inline int 155static int
156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) 156cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
157{ 157{
158 char dbf_text[15]; 158 char dbf_text[15];
@@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
585 * This device must not be known to Linux. So we simply 585 * This device must not be known to Linux. So we simply
586 * say that there is no device and return ENODEV. 586 * say that there is no device and return ENODEV.
587 */ 587 */
588 CIO_MSG_EVENT(0, "Blacklisted device detected " 588 CIO_MSG_EVENT(4, "Blacklisted device detected "
589 "at devno %04X, subchannel set %x\n", 589 "at devno %04X, subchannel set %x\n",
590 sch->schib.pmcw.dev, sch->schid.ssid); 590 sch->schib.pmcw.dev, sch->schid.ssid);
591 err = -ENODEV; 591 err = -ENODEV;
@@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs)
646 * Make sure that the i/o interrupt did not "overtake" 646 * Make sure that the i/o interrupt did not "overtake"
647 * the last HZ timer interrupt. 647 * the last HZ timer interrupt.
648 */ 648 */
649 account_ticks(); 649 account_ticks(S390_lowcore.int_clock);
650 /* 650 /*
651 * Get interrupt information from lowcore 651 * Get interrupt information from lowcore
652 */ 652 */
@@ -832,7 +832,7 @@ cio_get_console_subchannel(void)
832} 832}
833 833
834#endif 834#endif
835static inline int 835static int
836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) 836__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
837{ 837{
838 int retry, cc; 838 int retry, cc;
@@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
850 return -EBUSY; /* uhm... */ 850 return -EBUSY; /* uhm... */
851} 851}
852 852
853static inline int 853/* we can't use the normal udelay here, since it enables external interrupts */
854
855static void udelay_reset(unsigned long usecs)
856{
857 uint64_t start_cc, end_cc;
858
859 asm volatile ("STCK %0" : "=m" (start_cc));
860 do {
861 cpu_relax();
862 asm volatile ("STCK %0" : "=m" (end_cc));
863 } while (((end_cc - start_cc)/4096) < usecs);
864}
865
866static int
854__clear_subchannel_easy(struct subchannel_id schid) 867__clear_subchannel_easy(struct subchannel_id schid)
855{ 868{
856 int retry; 869 int retry;
@@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid)
865 if (schid_equal(&ti.schid, &schid)) 878 if (schid_equal(&ti.schid, &schid))
866 return 0; 879 return 0;
867 } 880 }
868 udelay(100); 881 udelay_reset(100);
869 } 882 }
870 return -EBUSY; 883 return -EBUSY;
871} 884}
@@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
882 int rc; 895 int rc;
883 896
884 pgm_check_occured = 0; 897 pgm_check_occured = 0;
885 s390_reset_pgm_handler = cio_reset_pgm_check_handler; 898 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
886 rc = stsch(schid, addr); 899 rc = stsch(schid, addr);
887 s390_reset_pgm_handler = NULL; 900 s390_base_pgm_handler_fn = NULL;
888 901
889 /* The program check handler could have changed pgm_check_occured */ 902 /* The program check handler could have changed pgm_check_occured. */
890 barrier(); 903 barrier();
891 904
892 if (pgm_check_occured) 905 if (pgm_check_occured)
@@ -944,7 +957,7 @@ static void css_reset(void)
944 /* Reset subchannels. */ 957 /* Reset subchannels. */
945 for_each_subchannel(__shutdown_subchannel_easy, NULL); 958 for_each_subchannel(__shutdown_subchannel_easy, NULL);
946 /* Reset channel paths. */ 959 /* Reset channel paths. */
947 s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; 960 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
948 /* Enable channel report machine checks. */ 961 /* Enable channel report machine checks. */
949 __ctl_set_bit(14, 28); 962 __ctl_set_bit(14, 28);
950 /* Temporarily reenable machine checks. */ 963 /* Temporarily reenable machine checks. */
@@ -969,7 +982,7 @@ static void css_reset(void)
969 local_mcck_disable(); 982 local_mcck_disable();
970 /* Disable channel report machine checks. */ 983 /* Disable channel report machine checks. */
971 __ctl_clear_bit(14, 28); 984 __ctl_clear_bit(14, 28);
972 s390_reset_mcck_handler = NULL; 985 s390_base_mcck_handler_fn = NULL;
973} 986}
974 987
975static struct reset_call css_reset_call = { 988static struct reset_call css_reset_call = {
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 828b2d334f0a..90b22faabbf7 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -519,8 +519,8 @@ struct cmb {
519/* insert a single device into the cmb_area list 519/* insert a single device into the cmb_area list
520 * called with cmb_area.lock held from alloc_cmb 520 * called with cmb_area.lock held from alloc_cmb
521 */ 521 */
522static inline int alloc_cmb_single (struct ccw_device *cdev, 522static int alloc_cmb_single(struct ccw_device *cdev,
523 struct cmb_data *cmb_data) 523 struct cmb_data *cmb_data)
524{ 524{
525 struct cmb *cmb; 525 struct cmb *cmb;
526 struct ccw_device_private *node; 526 struct ccw_device_private *node;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 9d6c02446863..fe0ace7aece8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1];
30 30
31int css_characteristics_avail = 0; 31int css_characteristics_avail = 0;
32 32
33inline int 33int
34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 34for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
35{ 35{
36 struct subchannel_id schid; 36 struct subchannel_id schid;
@@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev)
108 } 108 }
109} 109}
110 110
111extern int css_get_ssd_info(struct subchannel *sch);
112
113
114int css_sch_device_register(struct subchannel *sch) 111int css_sch_device_register(struct subchannel *sch)
115{ 112{
116 int ret; 113 int ret;
@@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
187 return dev ? to_subchannel(dev) : NULL; 184 return dev ? to_subchannel(dev) : NULL;
188} 185}
189 186
190static inline int css_get_subchannel_status(struct subchannel *sch) 187static int css_get_subchannel_status(struct subchannel *sch)
191{ 188{
192 struct schib schib; 189 struct schib schib;
193 190
@@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
299 /* Will be done on the slow path. */ 296 /* Will be done on the slow path. */
300 return -EAGAIN; 297 return -EAGAIN;
301 } 298 }
302 if (stsch(schid, &schib) || !schib.pmcw.dnv) { 299 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) {
303 /* Unusable - ignore. */ 300 /* Unusable - ignore. */
304 return 0; 301 return 0;
305 } 302 }
@@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused)
417 need_reprobe); 414 need_reprobe);
418} 415}
419 416
420DECLARE_WORK(css_reprobe_work, reprobe_all); 417static DECLARE_WORK(css_reprobe_work, reprobe_all);
421 418
422/* Schedule reprobing of all unregistered subchannels. */ 419/* Schedule reprobing of all unregistered subchannels. */
423void css_schedule_reprobe(void) 420void css_schedule_reprobe(void)
@@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
578 575
579static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 576static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
580 577
581static inline int __init setup_css(int nr) 578static int __init setup_css(int nr)
582{ 579{
583 u32 tod_high; 580 u32 tod_high;
584 int ret; 581 int ret;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 3464c5b875c4..ca2bab932a8a 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 144extern int css_init_done;
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern int css_process_crw(int, int);
147extern void css_reiterate_subchannels(void);
146 148
147#define __MAX_SUBCHANNEL 65535 149#define __MAX_SUBCHANNEL 65535
148#define __MAX_SSID 3 150#define __MAX_SSID 3
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 803579053c2f..e322111fb369 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -138,7 +138,6 @@ struct bus_type ccw_bus_type;
138 138
139static int io_subchannel_probe (struct subchannel *); 139static int io_subchannel_probe (struct subchannel *);
140static int io_subchannel_remove (struct subchannel *); 140static int io_subchannel_remove (struct subchannel *);
141void io_subchannel_irq (struct device *);
142static int io_subchannel_notify(struct device *, int); 141static int io_subchannel_notify(struct device *, int);
143static void io_subchannel_verify(struct device *); 142static void io_subchannel_verify(struct device *);
144static void io_subchannel_ioterm(struct device *); 143static void io_subchannel_ioterm(struct device *);
@@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
235 ssize_t ret = 0; 234 ssize_t ret = 0;
236 int chp; 235 int chp;
237 236
238 if (ssd) 237 for (chp = 0; chp < 8; chp++)
239 for (chp = 0; chp < 8; chp++) 238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
240 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
241 else
242 ret += sprintf (buf, "n/a");
243 ret += sprintf (buf+ret, "\n"); 239 ret += sprintf (buf+ret, "\n");
244 return min((ssize_t)PAGE_SIZE, ret); 240 return min((ssize_t)PAGE_SIZE, ret);
245} 241}
@@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = {
552 .attrs = ccwdev_attrs, 548 .attrs = ccwdev_attrs,
553}; 549};
554 550
555static inline int 551static int
556device_add_files (struct device *dev) 552device_add_files (struct device *dev)
557{ 553{
558 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); 554 return sysfs_create_group(&dev->kobj, &ccwdev_attr_group);
559} 555}
560 556
561static inline void 557static void
562device_remove_files(struct device *dev) 558device_remove_files(struct device *dev)
563{ 559{
564 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); 560 sysfs_remove_group(&dev->kobj, &ccwdev_attr_group);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 29db6341d632..b66338b76579 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work;
74extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
75extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
76 76
77void io_subchannel_irq (struct device *pdev);
77void io_subchannel_recog_done(struct ccw_device *cdev); 78void io_subchannel_recog_done(struct ccw_device *cdev);
78 79
79int ccw_device_cancel_halt_clear(struct ccw_device *); 80int ccw_device_cancel_halt_clear(struct ccw_device *);
@@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *);
118/* qdio needs this. */ 119/* qdio needs this. */
119void ccw_device_set_timeout(struct ccw_device *, int); 120void ccw_device_set_timeout(struct ccw_device *, int);
120extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 121extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
122extern struct bus_type ccw_bus_type;
121 123
122/* Channel measurement facility related */ 124/* Channel measurement facility related */
123void retry_set_schib(struct ccw_device *cdev); 125void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index eed14572fc3b..51238e7555bb 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev)
206 * been varied online on the SE so we have to find out by magic (i. e. driving 206 * been varied online on the SE so we have to find out by magic (i. e. driving
207 * the channel subsystem to device selection and updating our path masks). 207 * the channel subsystem to device selection and updating our path masks).
208 */ 208 */
209static inline void 209static void
210__recover_lost_chpids(struct subchannel *sch, int old_lpm) 210__recover_lost_chpids(struct subchannel *sch, int old_lpm)
211{ 211{
212 int mask, i; 212 int mask, i;
@@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
387 put_device (&cdev->dev); 387 put_device (&cdev->dev);
388} 388}
389 389
390static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) 390static int cmp_pgid(struct pgid *p1, struct pgid *p2)
391{ 391{
392 char *c1; 392 char *c1;
393 char *c2; 393 char *c2;
@@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
842call_handler_unsol: 842call_handler_unsol:
843 if (cdev->handler) 843 if (cdev->handler)
844 cdev->handler (cdev, 0, irb); 844 cdev->handler (cdev, 0, irb);
845 if (cdev->private->flags.doverify)
846 ccw_device_online_verify(cdev, 0);
845 return; 847 return;
846 } 848 }
847 /* Accumulate status and find out if a basic sense is needed. */ 849 /* Accumulate status and find out if a basic sense is needed. */
@@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
892/* 894/*
893 * Got an interrupt for a basic sense. 895 * Got an interrupt for a basic sense.
894 */ 896 */
895void 897static void
896ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 898ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
897{ 899{
898 struct irb *irb; 900 struct irb *irb;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index d269607336ec..d7b25b8f71d2 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
302 wake_up(&cdev->private->wait_q); 302 wake_up(&cdev->private->wait_q);
303} 303}
304 304
305static inline int 305static int
306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) 306__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
307{ 307{
308 int ret; 308 int ret;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index bdcf930f7beb..6b1caea622ea 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -25,7 +25,7 @@
25 * Check for any kind of channel or interface control check but don't 25 * Check for any kind of channel or interface control check but don't
26 * issue the message for the console device 26 * issue the message for the console device
27 */ 27 */
28static inline void 28static void
29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 29ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
30{ 30{
31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 31 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK |
@@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
72/* 72/*
73 * Copy valid bits from the extended control word to device irb. 73 * Copy valid bits from the extended control word to device irb.
74 */ 74 */
75static inline void 75static void
76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) 76ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
77{ 77{
78 /* 78 /*
@@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
94/* 94/*
95 * Check if extended status word is valid. 95 * Check if extended status word is valid.
96 */ 96 */
97static inline int 97static int
98ccw_device_accumulate_esw_valid(struct irb *irb) 98ccw_device_accumulate_esw_valid(struct irb *irb)
99{ 99{
100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 100 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND)
@@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb)
109/* 109/*
110 * Copy valid bits from the extended status word to device irb. 110 * Copy valid bits from the extended status word to device irb.
111 */ 111 */
112static inline void 112static void
113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) 113ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
114{ 114{
115 struct irb *cdev_irb; 115 struct irb *cdev_irb;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 6fd1940842eb..d726cd5777de 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -66,7 +66,6 @@ MODULE_LICENSE("GPL");
66/******************** HERE WE GO ***********************************/ 66/******************** HERE WE GO ***********************************/
67 67
68static const char version[] = "QDIO base support version 2"; 68static const char version[] = "QDIO base support version 2";
69extern struct bus_type ccw_bus_type;
70 69
71static int qdio_performance_stats = 0; 70static int qdio_performance_stats = 0;
72static int proc_perf_file_registration; 71static int proc_perf_file_registration;
@@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q)
138} 137}
139 138
140/*check ccq */ 139/*check ccq */
141static inline int 140static int
142qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 141qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
143{ 142{
144 char dbf_text[15]; 143 char dbf_text[15];
@@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
153 return -EIO; 152 return -EIO;
154} 153}
155/* EQBS: extract buffer states */ 154/* EQBS: extract buffer states */
156static inline int 155static int
157qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 156qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
158 unsigned int *start, unsigned int *cnt) 157 unsigned int *start, unsigned int *cnt)
159{ 158{
@@ -188,7 +187,7 @@ again:
188} 187}
189 188
190/* SQBS: set buffer states */ 189/* SQBS: set buffer states */
191static inline int 190static int
192qdio_do_sqbs(struct qdio_q *q, unsigned char state, 191qdio_do_sqbs(struct qdio_q *q, unsigned char state,
193 unsigned int *start, unsigned int *cnt) 192 unsigned int *start, unsigned int *cnt)
194{ 193{
@@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns 314 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
316 * an access exception 315 * an access exception
317 */ 316 */
318static inline int 317static int
319qdio_siga_output(struct qdio_q *q) 318qdio_siga_output(struct qdio_q *q)
320{ 319{
321 int cc; 320 int cc;
@@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q)
349 return cc; 348 return cc;
350} 349}
351 350
352static inline int 351static int
353qdio_siga_input(struct qdio_q *q) 352qdio_siga_input(struct qdio_q *q)
354{ 353{
355 int cc; 354 int cc;
@@ -421,7 +420,7 @@ tiqdio_sched_tl(void)
421 tasklet_hi_schedule(&tiqdio_tasklet); 420 tasklet_hi_schedule(&tiqdio_tasklet);
422} 421}
423 422
424static inline void 423static void
425qdio_mark_tiq(struct qdio_q *q) 424qdio_mark_tiq(struct qdio_q *q)
426{ 425{
427 unsigned long flags; 426 unsigned long flags;
@@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q)
471 tasklet_schedule(&q->tasklet); 470 tasklet_schedule(&q->tasklet);
472} 471}
473 472
474static inline int 473static int
475qdio_stop_polling(struct qdio_q *q) 474qdio_stop_polling(struct qdio_q *q)
476{ 475{
477#ifdef QDIO_USE_PROCESSING_STATE 476#ifdef QDIO_USE_PROCESSING_STATE
@@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q)
525 * sophisticated locking outside of unmark_q, so that we don't need to 524 * sophisticated locking outside of unmark_q, so that we don't need to
526 * disable the interrupts :-) 525 * disable the interrupts :-)
527*/ 526*/
528static inline void 527static void
529qdio_unmark_q(struct qdio_q *q) 528qdio_unmark_q(struct qdio_q *q)
530{ 529{
531 unsigned long flags; 530 unsigned long flags;
@@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
691 return q->first_to_check; 690 return q->first_to_check;
692} 691}
693 692
694static inline int 693static int
695qdio_get_outbound_buffer_frontier(struct qdio_q *q) 694qdio_get_outbound_buffer_frontier(struct qdio_q *q)
696{ 695{
697 struct qdio_irq *irq; 696 struct qdio_irq *irq;
@@ -774,7 +773,7 @@ out:
774} 773}
775 774
776/* all buffers are processed */ 775/* all buffers are processed */
777static inline int 776static int
778qdio_is_outbound_q_done(struct qdio_q *q) 777qdio_is_outbound_q_done(struct qdio_q *q)
779{ 778{
780 int no_used; 779 int no_used;
@@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q)
796 return (no_used==0); 795 return (no_used==0);
797} 796}
798 797
799static inline int 798static int
800qdio_has_outbound_q_moved(struct qdio_q *q) 799qdio_has_outbound_q_moved(struct qdio_q *q)
801{ 800{
802 int i; 801 int i;
@@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q)
816 } 815 }
817} 816}
818 817
819static inline void 818static void
820qdio_kick_outbound_q(struct qdio_q *q) 819qdio_kick_outbound_q(struct qdio_q *q)
821{ 820{
822 int result; 821 int result;
@@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q)
905 } 904 }
906} 905}
907 906
908static inline void 907static void
909qdio_kick_outbound_handler(struct qdio_q *q) 908qdio_kick_outbound_handler(struct qdio_q *q)
910{ 909{
911 int start, end, real_end, count; 910 int start, end, real_end, count;
@@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q)
942 q->error_status_flags=0; 941 q->error_status_flags=0;
943} 942}
944 943
945static inline void 944static void
946__qdio_outbound_processing(struct qdio_q *q) 945__qdio_outbound_processing(struct qdio_q *q)
947{ 946{
948 int siga_attempts; 947 int siga_attempts;
@@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q)
1002/************************* INBOUND ROUTINES *******************************/ 1001/************************* INBOUND ROUTINES *******************************/
1003 1002
1004 1003
1005static inline int 1004static int
1006qdio_get_inbound_buffer_frontier(struct qdio_q *q) 1005qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1007{ 1006{
1008 struct qdio_irq *irq; 1007 struct qdio_irq *irq;
@@ -1133,7 +1132,7 @@ out:
1133 return q->first_to_check; 1132 return q->first_to_check;
1134} 1133}
1135 1134
1136static inline int 1135static int
1137qdio_has_inbound_q_moved(struct qdio_q *q) 1136qdio_has_inbound_q_moved(struct qdio_q *q)
1138{ 1137{
1139 int i; 1138 int i;
@@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
1167} 1166}
1168 1167
1169/* means, no more buffers to be filled */ 1168/* means, no more buffers to be filled */
1170static inline int 1169static int
1171tiqdio_is_inbound_q_done(struct qdio_q *q) 1170tiqdio_is_inbound_q_done(struct qdio_q *q)
1172{ 1171{
1173 int no_used; 1172 int no_used;
@@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1228 return 0; 1227 return 0;
1229} 1228}
1230 1229
1231static inline int 1230static int
1232qdio_is_inbound_q_done(struct qdio_q *q) 1231qdio_is_inbound_q_done(struct qdio_q *q)
1233{ 1232{
1234 int no_used; 1233 int no_used;
@@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q)
1296 } 1295 }
1297} 1296}
1298 1297
1299static inline void 1298static void
1300qdio_kick_inbound_handler(struct qdio_q *q) 1299qdio_kick_inbound_handler(struct qdio_q *q)
1301{ 1300{
1302 int count, start, end, real_end, i; 1301 int count, start, end, real_end, i;
@@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q)
1343 } 1342 }
1344} 1343}
1345 1344
1346static inline void 1345static void
1347__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) 1346__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1348{ 1347{
1349 struct qdio_irq *irq_ptr; 1348 struct qdio_irq *irq_ptr;
@@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q)
1442 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); 1441 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1443} 1442}
1444 1443
1445static inline void 1444static void
1446__qdio_inbound_processing(struct qdio_q *q) 1445__qdio_inbound_processing(struct qdio_q *q)
1447{ 1446{
1448 int q_laps=0; 1447 int q_laps=0;
@@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q)
1493/************************* MAIN ROUTINES *******************************/ 1492/************************* MAIN ROUTINES *******************************/
1494 1493
1495#ifdef QDIO_USE_PROCESSING_STATE 1494#ifdef QDIO_USE_PROCESSING_STATE
1496static inline int 1495static int
1497tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) 1496tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1498{ 1497{
1499 if (!q) { 1498 if (!q) {
@@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1545} 1544}
1546#endif /* QDIO_USE_PROCESSING_STATE */ 1545#endif /* QDIO_USE_PROCESSING_STATE */
1547 1546
1548static inline void 1547static void
1549tiqdio_inbound_checks(void) 1548tiqdio_inbound_checks(void)
1550{ 1549{
1551 struct qdio_q *q; 1550 struct qdio_q *q;
@@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1949 mb(); 1948 mb();
1950} 1949}
1951 1950
1952static inline void 1951static void
1953qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) 1952qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1954{ 1953{
1955 char dbf_text[15]; 1954 char dbf_text[15];
@@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1966 1965
1967} 1966}
1968 1967
1969static inline void 1968static void
1970qdio_handle_pci(struct qdio_irq *irq_ptr) 1969qdio_handle_pci(struct qdio_irq *irq_ptr)
1971{ 1970{
1972 int i; 1971 int i;
@@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
2002 2001
2003static void qdio_establish_handle_irq(struct ccw_device*, int, int); 2002static void qdio_establish_handle_irq(struct ccw_device*, int, int);
2004 2003
2005static inline void 2004static void
2006qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, 2005qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
2007 int cstat, int dstat) 2006 int cstat, int dstat)
2008{ 2007{
@@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2229 return cc; 2228 return cc;
2230} 2229}
2231 2230
2232static inline void 2231static void
2233qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, 2232qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2234 unsigned long token) 2233 unsigned long token)
2235{ 2234{
@@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev)
2740 return 0; 2739 return 0;
2741} 2740}
2742 2741
2743static inline void 2742static void
2744qdio_allocate_do_dbf(struct qdio_initialize *init_data) 2743qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2745{ 2744{
2746 char dbf_text[20]; /* if a printf printed out more than 8 chars */ 2745 char dbf_text[20]; /* if a printf printed out more than 8 chars */
@@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2773 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); 2772 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2774} 2773}
2775 2774
2776static inline void 2775static void
2777qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) 2776qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2778{ 2777{
2779 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; 2778 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
@@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2792 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; 2791 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2793} 2792}
2794 2793
2795static inline void 2794static void
2796qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, 2795qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2797 int j, int iqfmt) 2796 int j, int iqfmt)
2798{ 2797{
@@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2813} 2812}
2814 2813
2815 2814
2816static inline void 2815static void
2817qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) 2816qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2818{ 2817{
2819 int i; 2818 int i;
@@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2839 } 2838 }
2840} 2839}
2841 2840
2842static inline void 2841static void
2843qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) 2842qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2844{ 2843{
2845 int i; 2844 int i;
@@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2865 } 2864 }
2866} 2865}
2867 2866
2868static inline int 2867static int
2869qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, 2868qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2870 int dstat) 2869 int dstat)
2871{ 2870{
@@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data)
3014 return 0; 3013 return 0;
3015} 3014}
3016 3015
3017int qdio_fill_irq(struct qdio_initialize *init_data) 3016static int qdio_fill_irq(struct qdio_initialize *init_data)
3018{ 3017{
3019 int i; 3018 int i;
3020 char dbf_text[15]; 3019 char dbf_text[15];
@@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
3367} 3366}
3368 3367
3369/* buffers filled forwards again to make Rick happy */ 3368/* buffers filled forwards again to make Rick happy */
3370static inline void 3369static void
3371qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, 3370qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3372 unsigned int count, struct qdio_buffer *buffers) 3371 unsigned int count, struct qdio_buffer *buffers)
3373{ 3372{
@@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3386 } 3385 }
3387} 3386}
3388 3387
3389static inline void 3388static void
3390qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, 3389qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3391 unsigned int count, struct qdio_buffer *buffers) 3390 unsigned int count, struct qdio_buffer *buffers)
3392{ 3391{
@@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3407 } 3406 }
3408} 3407}
3409 3408
3410static inline void 3409static void
3411do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, 3410do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3412 unsigned int qidx, unsigned int count, 3411 unsigned int qidx, unsigned int count,
3413 struct qdio_buffer *buffers) 3412 struct qdio_buffer *buffers)
@@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3443 qdio_mark_q(q); 3442 qdio_mark_q(q);
3444} 3443}
3445 3444
3446static inline void 3445static void
3447do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, 3446do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3448 unsigned int qidx, unsigned int count, 3447 unsigned int qidx, unsigned int count,
3449 struct qdio_buffer *buffers) 3448 struct qdio_buffer *buffers)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 81b5899f4010..c7d1355237b6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev)
465 * Flush all requests from the request/pending queue of an AP device. 465 * Flush all requests from the request/pending queue of an AP device.
466 * @ap_dev: pointer to the AP device. 466 * @ap_dev: pointer to the AP device.
467 */ 467 */
468static inline void __ap_flush_queue(struct ap_device *ap_dev) 468static void __ap_flush_queue(struct ap_device *ap_dev)
469{ 469{
470 struct ap_message *ap_msg, *next; 470 struct ap_message *ap_msg, *next;
471 471
@@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = {
587/** 587/**
588 * Pick one of the 16 ap domains. 588 * Pick one of the 16 ap domains.
589 */ 589 */
590static inline int ap_select_domain(void) 590static int ap_select_domain(void)
591{ 591{
592 int queue_depth, device_type, count, max_count, best_domain; 592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j; 593 int rc, i, j;
@@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void)
825 * required, bit 2^1 is set if the poll timer needs to get armed 825 * required, bit 2^1 is set if the poll timer needs to get armed
826 * Returns 0 if the device is still present, -ENODEV if not. 826 * Returns 0 if the device is still present, -ENODEV if not.
827 */ 827 */
828static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 828static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
829{ 829{
830 struct ap_queue_status status; 830 struct ap_queue_status status;
831 struct ap_message *ap_msg; 831 struct ap_message *ap_msg;
@@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
872 * required, bit 2^1 is set if the poll timer needs to get armed 872 * required, bit 2^1 is set if the poll timer needs to get armed
873 * Returns 0 if the device is still present, -ENODEV if not. 873 * Returns 0 if the device is still present, -ENODEV if not.
874 */ 874 */
875static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 875static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
876{ 876{
877 struct ap_queue_status status; 877 struct ap_queue_status status;
878 struct ap_message *ap_msg; 878 struct ap_message *ap_msg;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 1edc10a7a6f2..b9e59bc9435a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd,
791 return rc; 791 return rc;
792} 792}
793 793
794long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 794static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
795 unsigned long arg) 795 unsigned long arg)
796{ 796{
797 if (cmd == ICARSAMODEXPO) 797 if (cmd == ICARSAMODEXPO)
@@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = {
833 */ 833 */
834static struct proc_dir_entry *zcrypt_entry; 834static struct proc_dir_entry *zcrypt_entry;
835 835
836static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, 836static int sprintcl(unsigned char *outaddr, unsigned char *addr,
837 unsigned int len) 837 unsigned int len)
838{ 838{
839 int hl, i; 839 int hl, i;
840 840
@@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr,
845 return hl; 845 return hl;
846} 846}
847 847
848static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, 848static int sprintrw(unsigned char *outaddr, unsigned char *addr,
849 unsigned int len) 849 unsigned int len)
850{ 850{
851 int hl, inl, c, cx; 851 int hl, inl, c, cx;
852 852
@@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr,
865 return hl; 865 return hl;
866} 866}
867 867
868static inline int sprinthx(unsigned char *title, unsigned char *outaddr, 868static int sprinthx(unsigned char *title, unsigned char *outaddr,
869 unsigned char *addr, unsigned int len) 869 unsigned char *addr, unsigned int len)
870{ 870{
871 int hl, inl, r, rx; 871 int hl, inl, r, rx;
872 872
@@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr,
885 return hl; 885 return hl;
886} 886}
887 887
888static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, 888static int sprinthx4(unsigned char *title, unsigned char *outaddr,
889 unsigned int *array, unsigned int len) 889 unsigned int *array, unsigned int len)
890{ 890{
891 int hl, r; 891 int hl, r;
892 892
@@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset,
943 zcrypt_qdepth_mask(workarea); 943 zcrypt_qdepth_mask(workarea);
944 len += sprinthx("Waiting work element counts", 944 len += sprinthx("Waiting work element counts",
945 resp_buff+len, workarea, AP_DEVICES); 945 resp_buff+len, workarea, AP_DEVICES);
946 zcrypt_perdev_reqcnt((unsigned int *) workarea); 946 zcrypt_perdev_reqcnt((int *) workarea);
947 len += sprinthx4("Per-device successfully completed request counts", 947 len += sprinthx4("Per-device successfully completed request counts",
948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 948 resp_buff+len,(unsigned int *) workarea, AP_DEVICES);
949 *eof = 1; 949 *eof = 1;
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index 32e37014345c..818ffe05ac00 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev,
191 * 191 *
192 * Returns 0 on success or -EFAULT. 192 * Returns 0 on success or -EFAULT.
193 */ 193 */
194static inline int convert_type84(struct zcrypt_device *zdev, 194static int convert_type84(struct zcrypt_device *zdev,
195 struct ap_message *reply, 195 struct ap_message *reply,
196 char __user *outputdata, 196 char __user *outputdata,
197 unsigned int outputdatalength) 197 unsigned int outputdatalength)
198{ 198{
199 struct type84_hdr *t84h = reply->message; 199 struct type84_hdr *t84h = reply->message;
200 char *data; 200 char *data;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index b7153c1e15cd..252443b6bd1b 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -709,7 +709,8 @@ out_free:
709 * PCIXCC/CEX2C device to the request distributor 709 * PCIXCC/CEX2C device to the request distributor
710 * @xcRB: pointer to the send_cprb request buffer 710 * @xcRB: pointer to the send_cprb request buffer
711 */ 711 */
712long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) 712static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev,
713 struct ica_xcRB *xcRB)
713{ 714{
714 struct ap_message ap_msg; 715 struct ap_message ap_msg;
715 struct response_type resp_type = { 716 struct response_type resp_type = {
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 95f4e105cb96..7809a79feec7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -121,7 +121,7 @@ MODULE_LICENSE("GPL");
121#define DEBUG 121#define DEBUG
122#endif 122#endif
123 123
124 char debug_buffer[255]; 124static char debug_buffer[255];
125/** 125/**
126 * Debug Facility Stuff 126 * Debug Facility Stuff
127 */ 127 */
@@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch );
223/* Functions */ 223/* Functions */
224static int add_claw_reads(struct net_device *dev, 224static int add_claw_reads(struct net_device *dev,
225 struct ccwbk* p_first, struct ccwbk* p_last); 225 struct ccwbk* p_first, struct ccwbk* p_last);
226static void inline ccw_check_return_code (struct ccw_device *cdev, 226static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
227 int return_code); 227static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
228static void inline ccw_check_unit_check (struct chbk * p_ch,
229 unsigned char sense );
230static int find_link(struct net_device *dev, char *host_name, char *ws_name ); 228static int find_link(struct net_device *dev, char *host_name, char *ws_name );
231static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); 229static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
232static int init_ccw_bk(struct net_device *dev); 230static int init_ccw_bk(struct net_device *dev);
233static void probe_error( struct ccwgroup_device *cgdev); 231static void probe_error( struct ccwgroup_device *cgdev);
234static struct net_device_stats *claw_stats(struct net_device *dev); 232static struct net_device_stats *claw_stats(struct net_device *dev);
235static int inline pages_to_order_of_mag(int num_of_pages); 233static int pages_to_order_of_mag(int num_of_pages);
236static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); 234static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
237#ifdef DEBUG 235#ifdef DEBUG
238static void dumpit (char *buf, int len); 236static void dumpit (char *buf, int len);
@@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch )
1310* of magnitude get_free_pages() has an upper order of 9 * 1308* of magnitude get_free_pages() has an upper order of 9 *
1311*--------------------------------------------------------------------*/ 1309*--------------------------------------------------------------------*/
1312 1310
1313static int inline 1311static int
1314pages_to_order_of_mag(int num_of_pages) 1312pages_to_order_of_mag(int num_of_pages)
1315{ 1313{
1316 int order_of_mag=1; /* assume 2 pages */ 1314 int order_of_mag=1; /* assume 2 pages */
@@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1482 * * 1480 * *
1483 *-------------------------------------------------------------------*/ 1481 *-------------------------------------------------------------------*/
1484 1482
1485static void inline 1483static void
1486ccw_check_return_code(struct ccw_device *cdev, int return_code) 1484ccw_check_return_code(struct ccw_device *cdev, int return_code)
1487{ 1485{
1488#ifdef FUNCTRACE 1486#ifdef FUNCTRACE
@@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code)
1529* ccw_check_unit_check * 1527* ccw_check_unit_check *
1530*--------------------------------------------------------------------*/ 1528*--------------------------------------------------------------------*/
1531 1529
1532static void inline 1530static void
1533ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) 1531ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1534{ 1532{
1535 struct net_device *dev = p_ch->ndev; 1533 struct net_device *dev = p_ch->ndev;
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index 03cc263fe0da..5a84fbbc6611 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset)
369 * @param ch The channel where this skb has been received. 369 * @param ch The channel where this skb has been received.
370 * @param pskb The received skb. 370 * @param pskb The received skb.
371 */ 371 */
372static __inline__ void 372static void
373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) 373ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
374{ 374{
375 struct net_device *dev = ch->netdev; 375 struct net_device *dev = ch->netdev;
@@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
512 * @param ch The channel, the error belongs to. 512 * @param ch The channel, the error belongs to.
513 * @param return_code The error code to inspect. 513 * @param return_code The error code to inspect.
514 */ 514 */
515static void inline 515static void
516ccw_check_return_code(struct channel *ch, int return_code, char *msg) 516ccw_check_return_code(struct channel *ch, int return_code, char *msg)
517{ 517{
518 DBF_TEXT(trace, 5, __FUNCTION__); 518 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg)
547 * @param ch The channel, the sense code belongs to. 547 * @param ch The channel, the sense code belongs to.
548 * @param sense The sense code to inspect. 548 * @param sense The sense code to inspect.
549 */ 549 */
550static void inline 550static void
551ccw_unit_check(struct channel *ch, unsigned char sense) 551ccw_unit_check(struct channel *ch, unsigned char sense)
552{ 552{
553 DBF_TEXT(trace, 5, __FUNCTION__); 553 DBF_TEXT(trace, 5, __FUNCTION__);
@@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q)
603 } 603 }
604} 604}
605 605
606static __inline__ int 606static int
607ctc_checkalloc_buffer(struct channel *ch, int warn) 607ctc_checkalloc_buffer(struct channel *ch, int warn)
608{ 608{
609 DBF_TEXT(trace, 5, __FUNCTION__); 609 DBF_TEXT(trace, 5, __FUNCTION__);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
index e965f03a7291..76728ae4b843 100644
--- a/drivers/s390/net/cu3088.c
+++ b/drivers/s390/net/cu3088.c
@@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = {
57 57
58static struct ccw_driver cu3088_driver; 58static struct ccw_driver cu3088_driver;
59 59
60struct device *cu3088_root_dev; 60static struct device *cu3088_root_dev;
61 61
62static ssize_t 62static ssize_t
63group_write(struct device_driver *drv, const char *buf, size_t count) 63group_write(struct device_driver *drv, const char *buf, size_t count)
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index e5665b6743a1..b97dd15bdb9a 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
828/** 828/**
829 * Emit buffer of a lan comand. 829 * Emit buffer of a lan comand.
830 */ 830 */
831void 831static void
832lcs_lancmd_timeout(unsigned long data) 832lcs_lancmd_timeout(unsigned long data)
833{ 833{
834 struct lcs_reply *reply, *list_reply, *r; 834 struct lcs_reply *reply, *list_reply, *r;
@@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
1360 return 0; 1360 return 0;
1361} 1361}
1362 1362
1363void 1363static void
1364lcs_schedule_recovery(struct lcs_card *card) 1364lcs_schedule_recovery(struct lcs_card *card)
1365{ 1365{
1366 LCS_DBF_TEXT(2, trace, "startrec"); 1366 LCS_DBF_TEXT(2, trace, "startrec");
@@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char
1990 1990
1991} 1991}
1992 1992
1993DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); 1993static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
1994 1994
1995static ssize_t 1995static ssize_t
1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, 1996lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index d7d1cc0a5c8e..3346088f47e0 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2053,7 +2053,7 @@ out_free_ndev:
2053 return ret; 2053 return ret;
2054} 2054}
2055 2055
2056DRIVER_ATTR(connection, 0200, NULL, conn_write); 2056static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2057 2057
2058static ssize_t 2058static ssize_t
2059remove_write (struct device_driver *drv, const char *buf, size_t count) 2059remove_write (struct device_driver *drv, const char *buf, size_t count)
@@ -2112,7 +2112,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
2112 return -EINVAL; 2112 return -EINVAL;
2113} 2113}
2114 2114
2115DRIVER_ATTR(remove, 0200, NULL, remove_write); 2115static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2116 2116
2117static void 2117static void
2118netiucv_banner(void) 2118netiucv_banner(void)
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c
index 6bb558a9a032..7c735e1fe063 100644
--- a/drivers/s390/net/qeth_eddp.c
+++ b/drivers/s390/net/qeth_eddp.c
@@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
49 return buffers_needed; 49 return buffers_needed;
50} 50}
51 51
52static inline void 52static void
53qeth_eddp_free_context(struct qeth_eddp_context *ctx) 53qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54{ 54{
55 int i; 55 int i;
@@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
91 } 91 }
92} 92}
93 93
94static inline int 94static int
95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, 95qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96 struct qeth_eddp_context *ctx) 96 struct qeth_eddp_context *ctx)
97{ 97{
@@ -196,7 +196,7 @@ out:
196 return flush_cnt; 196 return flush_cnt;
197} 197}
198 198
199static inline void 199static void
200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, 200qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201 struct qeth_eddp_data *eddp, int data_len) 201 struct qeth_eddp_data *eddp, int data_len)
202{ 202{
@@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
256 ctx->offset += eddp->thl; 256 ctx->offset += eddp->thl;
257} 257}
258 258
259static inline void 259static void
260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, 260qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
261 __wsum *hcsum) 261 __wsum *hcsum)
262{ 262{
@@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
302 } 302 }
303} 303}
304 304
305static inline void 305static void
306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, 306qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307 struct qeth_eddp_data *eddp, int data_len, 307 struct qeth_eddp_data *eddp, int data_len,
308 __wsum hcsum) 308 __wsum hcsum)
@@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); 349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
350} 350}
351 351
352static inline __wsum 352static __wsum
353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) 353qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
354{ 354{
355 __wsum phcsum; /* pseudo header checksum */ 355 __wsum phcsum; /* pseudo header checksum */
@@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); 363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
364} 364}
365 365
366static inline __wsum 366static __wsum
367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) 367qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
368{ 368{
369 __be32 proto; 369 __be32 proto;
@@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
381 return phcsum; 381 return phcsum;
382} 382}
383 383
384static inline struct qeth_eddp_data * 384static struct qeth_eddp_data *
385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) 385qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
386{ 386{
387 struct qeth_eddp_data *eddp; 387 struct qeth_eddp_data *eddp;
@@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
399 return eddp; 399 return eddp;
400} 400}
401 401
402static inline void 402static void
403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 403__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404 struct qeth_eddp_data *eddp) 404 struct qeth_eddp_data *eddp)
405{ 405{
@@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
464 } 464 }
465} 465}
466 466
467static inline int 467static int
468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, 468qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469 struct sk_buff *skb, struct qeth_hdr *qhdr) 469 struct sk_buff *skb, struct qeth_hdr *qhdr)
470{ 470{
@@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
505 return 0; 505 return 0;
506} 506}
507 507
508static inline void 508static void
509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, 509qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
510 int hdr_len) 510 int hdr_len)
511{ 511{
@@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
529 (skb_shinfo(skb)->gso_segs + 1); 529 (skb_shinfo(skb)->gso_segs + 1);
530} 530}
531 531
532static inline struct qeth_eddp_context * 532static struct qeth_eddp_context *
533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, 533qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
534 int hdr_len) 534 int hdr_len)
535{ 535{
@@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
581 return ctx; 581 return ctx;
582} 582}
583 583
584static inline struct qeth_eddp_context * 584static struct qeth_eddp_context *
585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, 585qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
586 struct qeth_hdr *qhdr) 586 struct qeth_hdr *qhdr)
587{ 587{
@@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
625 } 625 }
626 return NULL; 626 return NULL;
627} 627}
628
629
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index d2efa5ff125d..2257e45594b3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo,
651 return 0; 651 return 0;
652} 652}
653 653
654static inline int 654static int
655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, 655__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr,
656 int same_type) 656 int same_type)
657{ 657{
@@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr)
795 return rc; 795 return rc;
796} 796}
797 797
798static inline void 798static void
799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) 799__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags)
800{ 800{
801 struct qeth_ipaddr *addr, *tmp; 801 struct qeth_ipaddr *addr, *tmp;
@@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *);
882static void qeth_add_multicast_ipv6(struct qeth_card *); 882static void qeth_add_multicast_ipv6(struct qeth_card *);
883#endif 883#endif
884 884
885static inline int 885static int
886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) 886qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread)
887{ 887{
888 unsigned long flags; 888 unsigned long flags;
@@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
920 wake_up(&card->wait_q); 920 wake_up(&card->wait_q);
921} 921}
922 922
923static inline int 923static int
924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread) 924__qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
925{ 925{
926 unsigned long flags; 926 unsigned long flags;
@@ -1764,9 +1764,9 @@ out:
1764 qeth_release_buffer(channel,iob); 1764 qeth_release_buffer(channel,iob);
1765} 1765}
1766 1766
1767static inline void 1767static void
1768qeth_prepare_control_data(struct qeth_card *card, int len, 1768qeth_prepare_control_data(struct qeth_card *card, int len,
1769struct qeth_cmd_buffer *iob) 1769 struct qeth_cmd_buffer *iob)
1770{ 1770{
1771 qeth_setup_ccw(&card->write,iob->data,len); 1771 qeth_setup_ccw(&card->write,iob->data,len);
1772 iob->callback = qeth_release_buffer; 1772 iob->callback = qeth_release_buffer;
@@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
2160 return 0; 2160 return 0;
2161} 2161}
2162 2162
2163static inline struct sk_buff * 2163static struct sk_buff *
2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) 2164qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2165{ 2165{
2166 struct sk_buff* skb; 2166 struct sk_buff* skb;
@@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr)
2179 return skb; 2179 return skb;
2180} 2180}
2181 2181
2182static inline struct sk_buff * 2182static struct sk_buff *
2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, 2183qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer,
2184 struct qdio_buffer_element **__element, int *__offset, 2184 struct qdio_buffer_element **__element, int *__offset,
2185 struct qeth_hdr **hdr) 2185 struct qeth_hdr **hdr)
@@ -2264,7 +2264,7 @@ no_mem:
2264 return NULL; 2264 return NULL;
2265} 2265}
2266 2266
2267static inline __be16 2267static __be16
2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev) 2268qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2269{ 2269{
2270 struct qeth_card *card; 2270 struct qeth_card *card;
@@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev)
2297 return htons(ETH_P_802_2); 2297 return htons(ETH_P_802_2);
2298} 2298}
2299 2299
2300static inline void 2300static void
2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, 2301qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2302 struct qeth_hdr *hdr) 2302 struct qeth_hdr *hdr)
2303{ 2303{
@@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb,
2351 fake_llc->ethertype = ETH_P_IP; 2351 fake_llc->ethertype = ETH_P_IP;
2352} 2352}
2353 2353
2354static inline void 2354static void
2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, 2355qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb,
2356 struct qeth_hdr *hdr) 2356 struct qeth_hdr *hdr)
2357{ 2357{
@@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2420 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2421} 2421}
2422 2422
2423static inline __u16 2423static __u16
2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2424qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2425 struct qeth_hdr *hdr) 2425 struct qeth_hdr *hdr)
2426{ 2426{
@@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2476 return vlan_id; 2476 return vlan_id;
2477} 2477}
2478 2478
2479static inline void 2479static void
2480qeth_process_inbound_buffer(struct qeth_card *card, 2480qeth_process_inbound_buffer(struct qeth_card *card,
2481 struct qeth_qdio_buffer *buf, int index) 2481 struct qeth_qdio_buffer *buf, int index)
2482{ 2482{
@@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card,
2528 } 2528 }
2529} 2529}
2530 2530
2531static inline struct qeth_buffer_pool_entry * 2531static struct qeth_buffer_pool_entry *
2532qeth_get_buffer_pool_entry(struct qeth_card *card) 2532qeth_get_buffer_pool_entry(struct qeth_card *card)
2533{ 2533{
2534 struct qeth_buffer_pool_entry *entry; 2534 struct qeth_buffer_pool_entry *entry;
@@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card)
2543 return NULL; 2543 return NULL;
2544} 2544}
2545 2545
2546static inline void 2546static void
2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) 2547qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2548{ 2548{
2549 struct qeth_buffer_pool_entry *pool_entry; 2549 struct qeth_buffer_pool_entry *pool_entry;
@@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf)
2570 buf->state = QETH_QDIO_BUF_EMPTY; 2570 buf->state = QETH_QDIO_BUF_EMPTY;
2571} 2571}
2572 2572
2573static inline void 2573static void
2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 2574qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2575 struct qeth_qdio_out_buffer *buf) 2575 struct qeth_qdio_out_buffer *buf)
2576{ 2576{
@@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 2595 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
2596} 2596}
2597 2597
2598static inline void 2598static void
2599qeth_queue_input_buffer(struct qeth_card *card, int index) 2599qeth_queue_input_buffer(struct qeth_card *card, int index)
2600{ 2600{
2601 struct qeth_qdio_q *queue = card->qdio.in_q; 2601 struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status,
2699 card->perf_stats.inbound_start_time; 2699 card->perf_stats.inbound_start_time;
2700} 2700}
2701 2701
2702static inline int 2702static int
2703qeth_handle_send_error(struct qeth_card *card, 2703qeth_handle_send_error(struct qeth_card *card,
2704 struct qeth_qdio_out_buffer *buffer, 2704 struct qeth_qdio_out_buffer *buffer,
2705 unsigned int qdio_err, unsigned int siga_err) 2705 unsigned int qdio_err, unsigned int siga_err)
@@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
2821 * Switched to packing state if the number of used buffers on a queue 2821 * Switched to packing state if the number of used buffers on a queue
2822 * reaches a certain limit. 2822 * reaches a certain limit.
2823 */ 2823 */
2824static inline void 2824static void
2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) 2825qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2826{ 2826{
2827 if (!queue->do_pack) { 2827 if (!queue->do_pack) {
@@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
2842 * In that case 1 is returned to inform the caller. If no buffer 2842 * In that case 1 is returned to inform the caller. If no buffer
2843 * has to be flushed, zero is returned. 2843 * has to be flushed, zero is returned.
2844 */ 2844 */
2845static inline int 2845static int
2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) 2846qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2847{ 2847{
2848 struct qeth_qdio_out_buffer *buffer; 2848 struct qeth_qdio_out_buffer *buffer;
@@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
2877 * Checks if there is a packing buffer and prepares it to be flushed. 2877 * Checks if there is a packing buffer and prepares it to be flushed.
2878 * In that case returns 1, otherwise zero. 2878 * In that case returns 1, otherwise zero.
2879 */ 2879 */
2880static inline int 2880static int
2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) 2881qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2882{ 2882{
2883 struct qeth_qdio_out_buffer *buffer; 2883 struct qeth_qdio_out_buffer *buffer;
@@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
2894 return 0; 2894 return 0;
2895} 2895}
2896 2896
2897static inline void 2897static void
2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) 2898qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
2899{ 2899{
2900 int index; 2900 int index;
@@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev,
3594 } 3594 }
3595} 3595}
3596 3596
3597static inline int 3597static int
3598qeth_send_packet(struct qeth_card *, struct sk_buff *); 3598qeth_send_packet(struct qeth_card *, struct sk_buff *);
3599 3599
3600static int 3600static int
@@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev)
3759 return 0; 3759 return 0;
3760} 3760}
3761 3761
3762static inline int 3762static int
3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) 3763qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3764{ 3764{
3765 int cast_type = RTN_UNSPEC; 3765 int cast_type = RTN_UNSPEC;
@@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
3806 return cast_type; 3806 return cast_type;
3807} 3807}
3808 3808
3809static inline int 3809static int
3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, 3810qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
3811 int ipv, int cast_type) 3811 int ipv, int cast_type)
3812{ 3812{
@@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb)
3853 } 3853 }
3854} 3854}
3855 3855
3856static inline struct qeth_hdr * 3856static struct qeth_hdr *
3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) 3857__qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3858{ 3858{
3859#ifdef CONFIG_QETH_VLAN 3859#ifdef CONFIG_QETH_VLAN
@@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv)
3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); 3882 qeth_push_skb(card, skb, sizeof(struct qeth_hdr)));
3883} 3883}
3884 3884
3885static inline void 3885static void
3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) 3886__qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb)
3887{ 3887{
3888 if (orig_skb != new_skb) 3888 if (orig_skb != new_skb)
3889 dev_kfree_skb_any(new_skb); 3889 dev_kfree_skb_any(new_skb);
3890} 3890}
3891 3891
3892static inline struct sk_buff * 3892static struct sk_buff *
3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3893qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
3894 struct qeth_hdr **hdr, int ipv) 3894 struct qeth_hdr **hdr, int ipv)
3895{ 3895{
@@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type)
3940 return ct | QETH_CAST_UNICAST; 3940 return ct | QETH_CAST_UNICAST;
3941} 3941}
3942 3942
3943static inline void 3943static void
3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, 3944qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3945 struct sk_buff *skb) 3945 struct sk_buff *skb)
3946{ 3946{
@@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr,
3977 } 3977 }
3978} 3978}
3979 3979
3980static inline void 3980static void
3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, 3981qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
3982 struct sk_buff *skb, int cast_type) 3982 struct sk_buff *skb, int cast_type)
3983{ 3983{
@@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
4068 } 4068 }
4069} 4069}
4070 4070
4071static inline void 4071static void
4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, 4072__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4073 int is_tso, int *next_element_to_fill) 4073 int is_tso, int *next_element_to_fill)
4074{ 4074{
@@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
4112 *next_element_to_fill = element; 4112 *next_element_to_fill = element;
4113} 4113}
4114 4114
4115static inline int 4115static int
4116qeth_fill_buffer(struct qeth_qdio_out_q *queue, 4116qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4117 struct qeth_qdio_out_buffer *buf, 4117 struct qeth_qdio_out_buffer *buf,
4118 struct sk_buff *skb) 4118 struct sk_buff *skb)
@@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue,
4171 return flush_cnt; 4171 return flush_cnt;
4172} 4172}
4173 4173
4174static inline int 4174static int
4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4175qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4176 struct sk_buff *skb, struct qeth_hdr *hdr, 4176 struct sk_buff *skb, struct qeth_hdr *hdr,
4177 int elements_needed, 4177 int elements_needed,
@@ -4222,7 +4222,7 @@ out:
4222 return -EBUSY; 4222 return -EBUSY;
4223} 4223}
4224 4224
4225static inline int 4225static int
4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, 4226qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4227 struct sk_buff *skb, struct qeth_hdr *hdr, 4227 struct sk_buff *skb, struct qeth_hdr *hdr,
4228 int elements_needed, struct qeth_eddp_context *ctx) 4228 int elements_needed, struct qeth_eddp_context *ctx)
@@ -4328,7 +4328,7 @@ out:
4328 return rc; 4328 return rc;
4329} 4329}
4330 4330
4331static inline int 4331static int
4332qeth_get_elements_no(struct qeth_card *card, void *hdr, 4332qeth_get_elements_no(struct qeth_card *card, void *hdr,
4333 struct sk_buff *skb, int elems) 4333 struct sk_buff *skb, int elems)
4334{ 4334{
@@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr,
4349} 4349}
4350 4350
4351 4351
4352static inline int 4352static int
4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) 4353qeth_send_packet(struct qeth_card *card, struct sk_buff *skb)
4354{ 4354{
4355 int ipv = 0; 4355 int ipv = 0;
@@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4536} 4536}
4537 4537
4538 4538
4539static inline const char * 4539static const char *
4540qeth_arp_get_error_cause(int *rc) 4540qeth_arp_get_error_cause(int *rc)
4541{ 4541{
4542 switch (*rc) { 4542 switch (*rc) {
@@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries)
4597 return rc; 4597 return rc;
4598} 4598}
4599 4599
4600static inline void 4600static void
4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, 4601qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
4602 struct qeth_arp_query_data *qdata, 4602 struct qeth_arp_query_data *qdata,
4603 int entry_size, int uentry_size) 4603 int entry_size, int uentry_size)
@@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5214 spin_unlock_irqrestore(&card->vlanlock, flags); 5214 spin_unlock_irqrestore(&card->vlanlock, flags);
5215} 5215}
5216 5216
5217static inline void 5217static void
5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, 5218qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf,
5219 unsigned short vid) 5219 unsigned short vid)
5220{ 5220{
@@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card)
5625 spin_unlock_irqrestore(&card->ip_lock, flags); 5625 spin_unlock_irqrestore(&card->ip_lock, flags);
5626} 5626}
5627 5627
5628static inline void 5628static void
5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) 5629qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev)
5630{ 5630{
5631 struct qeth_ipaddr *ipm; 5631 struct qeth_ipaddr *ipm;
@@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card)
5711} 5711}
5712 5712
5713#ifdef CONFIG_QETH_IPV6 5713#ifdef CONFIG_QETH_IPV6
5714static inline void 5714static void
5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) 5715qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev)
5716{ 5716{
5717 struct qeth_ipaddr *ipm; 5717 struct qeth_ipaddr *ipm;
@@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd)
6022 6022
6023 return rc; 6023 return rc;
6024} 6024}
6025static inline void 6025static void
6026qeth_fill_netmask(u8 *netmask, unsigned int len) 6026qeth_fill_netmask(u8 *netmask, unsigned int len)
6027{ 6027{
6028 int i,j; 6028 int i,j;
@@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode)
6626 return rc; 6626 return rc;
6627} 6627}
6628 6628
6629static inline int 6629static int
6630qeth_setadapter_hstr(struct qeth_card *card) 6630qeth_setadapter_hstr(struct qeth_card *card)
6631{ 6631{
6632 int rc; 6632 int rc;
@@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card,
6889 return rc; 6889 return rc;
6890} 6890}
6891 6891
6892static inline int 6892static int
6893qeth_start_ipa_arp_processing(struct qeth_card *card) 6893qeth_start_ipa_arp_processing(struct qeth_card *card)
6894{ 6894{
6895 int rc; 6895 int rc;
@@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
7529 wake_up(&card->wait_q); 7529 wake_up(&card->wait_q);
7530} 7530}
7531 7531
7532static inline int 7532static int
7533qeth_threads_running(struct qeth_card *card, unsigned long threads) 7533qeth_threads_running(struct qeth_card *card, unsigned long threads)
7534{ 7534{
7535 unsigned long flags; 7535 unsigned long flags;
@@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto,
8118 spin_unlock_irqrestore(&card->ip_lock, flags); 8118 spin_unlock_irqrestore(&card->ip_lock, flags);
8119} 8119}
8120 8120
8121static inline void 8121static void
8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) 8122qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
8123{ 8123{
8124 int i, j; 8124 int i, j;
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c
index 5836737ac58f..d518419cd0c6 100644
--- a/drivers/s390/net/qeth_sys.c
+++ b/drivers/s390/net/qeth_sys.c
@@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c
328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, 328static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
329 qeth_dev_bufcnt_store); 329 qeth_dev_bufcnt_store);
330 330
331static inline ssize_t 331static ssize_t
332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, 332qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route,
333 char *buf) 333 char *buf)
334{ 334{
@@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu
368 return qeth_dev_route_show(card, &card->options.route4, buf); 368 return qeth_dev_route_show(card, &card->options.route4, buf);
369} 369}
370 370
371static inline ssize_t 371static ssize_t
372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, 372qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route,
373 enum qeth_prot_versions prot, const char *buf, size_t count) 373 enum qeth_prot_versions prot, const char *buf, size_t count)
374{ 374{
@@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \
998 .store = _store, \ 998 .store = _store, \
999}; 999};
1000 1000
1001int 1001static int
1002qeth_check_layer2(struct qeth_card *card) 1002qeth_check_layer2(struct qeth_card *card)
1003{ 1003{
1004 if (card->options.layer2) 1004 if (card->options.layer2)
@@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
1100 qeth_dev_ipato_invert4_show, 1100 qeth_dev_ipato_invert4_show,
1101 qeth_dev_ipato_invert4_store); 1101 qeth_dev_ipato_invert4_store);
1102 1102
1103static inline ssize_t 1103static ssize_t
1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, 1104qeth_dev_ipato_add_show(char *buf, struct qeth_card *card,
1105 enum qeth_prot_versions proto) 1105 enum qeth_prot_versions proto)
1106{ 1106{
@@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char
1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); 1146 return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
1147} 1147}
1148 1148
1149static inline int 1149static int
1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, 1150qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1151 u8 *addr, int *mask_bits) 1151 u8 *addr, int *mask_bits)
1152{ 1152{
@@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto,
1178 return 0; 1178 return 0;
1179} 1179}
1180 1180
1181static inline ssize_t 1181static ssize_t
1182qeth_dev_ipato_add_store(const char *buf, size_t count, 1182qeth_dev_ipato_add_store(const char *buf, size_t count,
1183 struct qeth_card *card, enum qeth_prot_versions proto) 1183 struct qeth_card *card, enum qeth_prot_versions proto)
1184{ 1184{
@@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
1223 qeth_dev_ipato_add4_show, 1223 qeth_dev_ipato_add4_show,
1224 qeth_dev_ipato_add4_store); 1224 qeth_dev_ipato_add4_store);
1225 1225
1226static inline ssize_t 1226static ssize_t
1227qeth_dev_ipato_del_store(const char *buf, size_t count, 1227qeth_dev_ipato_del_store(const char *buf, size_t count,
1228 struct qeth_card *card, enum qeth_prot_versions proto) 1228 struct qeth_card *card, enum qeth_prot_versions proto)
1229{ 1229{
@@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = {
1361 .attrs = (struct attribute **)qeth_ipato_device_attrs, 1361 .attrs = (struct attribute **)qeth_ipato_device_attrs,
1362}; 1362};
1363 1363
1364static inline ssize_t 1364static ssize_t
1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, 1365qeth_dev_vipa_add_show(char *buf, struct qeth_card *card,
1366 enum qeth_prot_versions proto) 1366 enum qeth_prot_versions proto)
1367{ 1367{
@@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char
1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); 1407 return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
1408} 1408}
1409 1409
1410static inline int 1410static int
1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, 1411qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1412 u8 *addr) 1412 u8 *addr)
1413{ 1413{
@@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto,
1418 return 0; 1418 return 0;
1419} 1419}
1420 1420
1421static inline ssize_t 1421static ssize_t
1422qeth_dev_vipa_add_store(const char *buf, size_t count, 1422qeth_dev_vipa_add_store(const char *buf, size_t count,
1423 struct qeth_card *card, enum qeth_prot_versions proto) 1423 struct qeth_card *card, enum qeth_prot_versions proto)
1424{ 1424{
@@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
1451 qeth_dev_vipa_add4_show, 1451 qeth_dev_vipa_add4_show,
1452 qeth_dev_vipa_add4_store); 1452 qeth_dev_vipa_add4_store);
1453 1453
1454static inline ssize_t 1454static ssize_t
1455qeth_dev_vipa_del_store(const char *buf, size_t count, 1455qeth_dev_vipa_del_store(const char *buf, size_t count,
1456 struct qeth_card *card, enum qeth_prot_versions proto) 1456 struct qeth_card *card, enum qeth_prot_versions proto)
1457{ 1457{
@@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = {
1542 .attrs = (struct attribute **)qeth_vipa_device_attrs, 1542 .attrs = (struct attribute **)qeth_vipa_device_attrs,
1543}; 1543};
1544 1544
1545static inline ssize_t 1545static ssize_t
1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, 1546qeth_dev_rxip_add_show(char *buf, struct qeth_card *card,
1547 enum qeth_prot_versions proto) 1547 enum qeth_prot_versions proto)
1548{ 1548{
@@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char
1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); 1588 return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
1589} 1589}
1590 1590
1591static inline int 1591static int
1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, 1592qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1593 u8 *addr) 1593 u8 *addr)
1594{ 1594{
@@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto,
1599 return 0; 1599 return 0;
1600} 1600}
1601 1601
1602static inline ssize_t 1602static ssize_t
1603qeth_dev_rxip_add_store(const char *buf, size_t count, 1603qeth_dev_rxip_add_store(const char *buf, size_t count,
1604 struct qeth_card *card, enum qeth_prot_versions proto) 1604 struct qeth_card *card, enum qeth_prot_versions proto)
1605{ 1605{
@@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
1632 qeth_dev_rxip_add4_show, 1632 qeth_dev_rxip_add4_show,
1633 qeth_dev_rxip_add4_store); 1633 qeth_dev_rxip_add4_store);
1634 1634
1635static inline ssize_t 1635static ssize_t
1636qeth_dev_rxip_del_store(const char *buf, size_t count, 1636qeth_dev_rxip_del_store(const char *buf, size_t count,
1637 struct qeth_card *card, enum qeth_prot_versions proto) 1637 struct qeth_card *card, enum qeth_prot_versions proto)
1638{ 1638{
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c
index e088b5e28711..806bb1a921eb 100644
--- a/drivers/s390/s390mach.c
+++ b/drivers/s390/s390mach.c
@@ -13,22 +13,18 @@
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/workqueue.h> 14#include <linux/workqueue.h>
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/device.h>
16#include <linux/kthread.h> 17#include <linux/kthread.h>
17 18#include <asm/etr.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19 20#include <asm/cio.h>
21#include "cio/cio.h"
22#include "cio/chsc.h"
23#include "cio/css.h"
20#include "s390mach.h" 24#include "s390mach.h"
21 25
22static struct semaphore m_sem; 26static struct semaphore m_sem;
23 27
24extern int css_process_crw(int, int);
25extern int chsc_process_crw(void);
26extern int chp_process_crw(int, int);
27extern void css_reiterate_subchannels(void);
28
29extern struct workqueue_struct *slow_path_wq;
30extern struct work_struct slow_path_work;
31
32static NORET_TYPE void 28static NORET_TYPE void
33s390_handle_damage(char *msg) 29s390_handle_damage(char *msg)
34{ 30{
@@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs)
470 s390_handle_damage("unable to revalidate registers."); 466 s390_handle_damage("unable to revalidate registers.");
471 } 467 }
472 468
469 if (mci->cd) {
470 /* Timing facility damage */
471 s390_handle_damage("TOD clock damaged");
472 }
473
474 if (mci->ed && mci->ec) {
475 /* External damage */
476 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
477 etr_sync_check();
478 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
479 etr_switch_to_local();
480 }
481
473 if (mci->se) 482 if (mci->se)
474 /* Storage error uncorrected */ 483 /* Storage error uncorrected */
475 s390_handle_damage("received storage error uncorrected " 484 s390_handle_damage("received storage error uncorrected "
@@ -508,7 +517,7 @@ static int
508machine_check_init(void) 517machine_check_init(void)
509{ 518{
510 init_MUTEX_LOCKED(&m_sem); 519 init_MUTEX_LOCKED(&m_sem);
511 ctl_clear_bit(14, 25); /* disable external damage MCH */ 520 ctl_set_bit(14, 25); /* enable external damage MCH */
512 ctl_set_bit(14, 27); /* enable system recovery MCH */ 521 ctl_set_bit(14, 27); /* enable system recovery MCH */
513#ifdef CONFIG_MACHCHK_WARNING 522#ifdef CONFIG_MACHCHK_WARNING
514 ctl_set_bit(14, 24); /* enable warning MCH */ 523 ctl_set_bit(14, 24); /* enable warning MCH */
@@ -529,7 +538,11 @@ arch_initcall(machine_check_init);
529static int __init 538static int __init
530machine_check_crw_init (void) 539machine_check_crw_init (void)
531{ 540{
532 kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); 541 struct task_struct *task;
542
543 task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck");
544 if (IS_ERR(task))
545 return PTR_ERR(task);
533 ctl_set_bit(14, 28); /* enable channel report MCH */ 546 ctl_set_bit(14, 28); /* enable channel report MCH */
534 return 0; 547 return 0;
535} 548}
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h
index 7abb42a09ae2..d3ca4281a494 100644
--- a/drivers/s390/s390mach.h
+++ b/drivers/s390/s390mach.h
@@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw )
102 return ccode; 102 return ccode;
103} 103}
104 104
105#define ED_ETR_SYNC 12 /* External damage ETR sync check */
106#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
107
105#endif /* __s390mach */ 108#endif /* __s390mach */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 85093b71f9fa..39a885266790 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -47,13 +47,12 @@ static int __init zfcp_module_init(void);
47static void zfcp_ns_gid_pn_handler(unsigned long); 47static void zfcp_ns_gid_pn_handler(unsigned long);
48 48
49/* miscellaneous */ 49/* miscellaneous */
50static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); 50static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t);
51static inline void zfcp_sg_list_free(struct zfcp_sg_list *); 51static void zfcp_sg_list_free(struct zfcp_sg_list *);
52static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, 52static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *,
53 void __user *, size_t); 53 void __user *, size_t);
54static inline int zfcp_sg_list_copy_to_user(void __user *, 54static int zfcp_sg_list_copy_to_user(void __user *,
55 struct zfcp_sg_list *, size_t); 55 struct zfcp_sg_list *, size_t);
56
57static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); 56static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long);
58 57
59#define ZFCP_CFDC_IOC_MAGIC 0xDD 58#define ZFCP_CFDC_IOC_MAGIC 0xDD
@@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command,
605 * elements of the scatter-gather list. The maximum size of a single element 604 * elements of the scatter-gather list. The maximum size of a single element
606 * in the scatter-gather list is PAGE_SIZE. 605 * in the scatter-gather list is PAGE_SIZE.
607 */ 606 */
608static inline int 607static int
609zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) 608zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
610{ 609{
611 struct scatterlist *sg; 610 struct scatterlist *sg;
@@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size)
652 * Memory for each element in the scatter-gather list is freed. 651 * Memory for each element in the scatter-gather list is freed.
653 * Finally sg_list->sg is freed itself and sg_list->count is reset. 652 * Finally sg_list->sg is freed itself and sg_list->count is reset.
654 */ 653 */
655static inline void 654static void
656zfcp_sg_list_free(struct zfcp_sg_list *sg_list) 655zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
657{ 656{
658 struct scatterlist *sg; 657 struct scatterlist *sg;
@@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
697 * @size: number of bytes to be copied 696 * @size: number of bytes to be copied
698 * Return: 0 on success, -EFAULT if copy_from_user fails. 697 * Return: 0 on success, -EFAULT if copy_from_user fails.
699 */ 698 */
700static inline int 699static int
701zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, 700zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
702 void __user *user_buffer, 701 void __user *user_buffer,
703 size_t size) 702 size_t size)
@@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list,
735 * @size: number of bytes to be copied 734 * @size: number of bytes to be copied
736 * Return: 0 on success, -EFAULT if copy_to_user fails 735 * Return: 0 on success, -EFAULT if copy_to_user fails
737 */ 736 */
738static inline int 737static int
739zfcp_sg_list_copy_to_user(void __user *user_buffer, 738zfcp_sg_list_copy_to_user(void __user *user_buffer,
740 struct zfcp_sg_list *sg_list, 739 struct zfcp_sg_list *sg_list,
741 size_t size) 740 size_t size)
@@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = {
1799 * @code: reason code 1798 * @code: reason code
1800 * @rc_table: table of reason codes and descriptions 1799 * @rc_table: table of reason codes and descriptions
1801 */ 1800 */
1802static inline const char * 1801static const char *
1803zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) 1802zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table)
1804{ 1803{
1805 const char *descr = "unknown reason code"; 1804 const char *descr = "unknown reason code";
@@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt)
1847 * @rjt_par: reject parameter acc. to FC-PH/FC-FS 1846 * @rjt_par: reject parameter acc. to FC-PH/FC-FS
1848 * @rc_table: table of reason codes and descriptions 1847 * @rc_table: table of reason codes and descriptions
1849 */ 1848 */
1850static inline void 1849static void
1851zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, 1850zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par,
1852 const struct zfcp_rc_entry *rc_table) 1851 const struct zfcp_rc_entry *rc_table)
1853{ 1852{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0aa3b1ac76af..d8191d115c14 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize,
31 31
32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER 32#define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER
33 33
34static inline int 34static int
35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) 35zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck)
36{ 36{
37 unsigned long long sec; 37 unsigned long long sec;
@@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label,
106 return len; 106 return len;
107} 107}
108 108
109static inline int 109static int
110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, 110zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
111 debug_entry_t * entry, char *out_buf) 111 debug_entry_t * entry, char *out_buf)
112{ 112{
@@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area,
130 return len; 130 return len;
131} 131}
132 132
133inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) 133void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
134{ 134{
135 struct zfcp_adapter *adapter = fsf_req->adapter; 135 struct zfcp_adapter *adapter = fsf_req->adapter;
136 struct fsf_qtcb *qtcb = fsf_req->qtcb; 136 struct fsf_qtcb *qtcb = fsf_req->qtcb;
@@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 241 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
242} 242}
243 243
244inline void 244void
245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, 245zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
246 struct fsf_status_read_buffer *status_buffer) 246 struct fsf_status_read_buffer *status_buffer)
247{ 247{
@@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 295 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
296} 296}
297 297
298inline void 298void
299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, 299zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
300 unsigned int qdio_error, unsigned int siga_error, 300 unsigned int qdio_error, unsigned int siga_error,
301 int sbal_index, int sbal_count) 301 int sbal_index, int sbal_count)
@@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); 316 spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
317} 317}
318 318
319static inline int 319static int
320zfcp_hba_dbf_view_response(char *out_buf, 320zfcp_hba_dbf_view_response(char *out_buf,
321 struct zfcp_hba_dbf_record_response *rec) 321 struct zfcp_hba_dbf_record_response *rec)
322{ 322{
@@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf,
403 return len; 403 return len;
404} 404}
405 405
406static inline int 406static int
407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) 407zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
408{ 408{
409 int len = 0; 409 int len = 0;
@@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec)
424 return len; 424 return len;
425} 425}
426 426
427static inline int 427static int
428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) 428zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec)
429{ 429{
430 int len = 0; 430 int len = 0;
@@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view,
469 return len; 469 return len;
470} 470}
471 471
472struct debug_view zfcp_hba_dbf_view = { 472static struct debug_view zfcp_hba_dbf_view = {
473 "structured", 473 "structured",
474 NULL, 474 NULL,
475 &zfcp_dbf_view_header, 475 &zfcp_dbf_view_header,
@@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = {
478 NULL 478 NULL
479}; 479};
480 480
481inline void 481void
482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, 482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
483 u32 s_id, u32 d_id, void *buffer, int buflen) 483 u32 s_id, u32 d_id, void *buffer, int buflen)
484{ 484{
@@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 519 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
520} 520}
521 521
522inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) 522void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
523{ 523{
524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 524 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
525 struct zfcp_port *port = ct->port; 525 struct zfcp_port *port = ct->port;
@@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
531 ct->req->length); 531 ct->req->length);
532} 532}
533 533
534inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) 534void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
535{ 535{
536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; 536 struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
537 struct zfcp_port *port = ct->port; 537 struct zfcp_port *port = ct->port;
@@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
543 ct->resp->length); 543 ct->resp->length);
544} 544}
545 545
546static inline void 546static void
547_zfcp_san_dbf_event_common_els(const char *tag, int level, 547_zfcp_san_dbf_event_common_els(const char *tag, int level,
548 struct zfcp_fsf_req *fsf_req, u32 s_id, 548 struct zfcp_fsf_req *fsf_req, u32 s_id,
549 u32 d_id, u8 ls_code, void *buffer, int buflen) 549 u32 d_id, u8 ls_code, void *buffer, int buflen)
@@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level,
585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); 585 spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
586} 586}
587 587
588inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) 588void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
589{ 589{
590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 590 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
591 591
@@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
597 els->req->length); 597 els->req->length);
598} 598}
599 599
600inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) 600void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
601{ 601{
602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; 602 struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
603 603
@@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
608 els->resp->length); 608 els->resp->length);
609} 609}
610 610
611inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) 611void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
612{ 612{
613 struct zfcp_adapter *adapter = fsf_req->adapter; 613 struct zfcp_adapter *adapter = fsf_req->adapter;
614 struct fsf_status_read_buffer *status_buffer = 614 struct fsf_status_read_buffer *status_buffer =
@@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view,
693 return len; 693 return len;
694} 694}
695 695
696struct debug_view zfcp_san_dbf_view = { 696static struct debug_view zfcp_san_dbf_view = {
697 "structured", 697 "structured",
698 NULL, 698 NULL,
699 &zfcp_dbf_view_header, 699 &zfcp_dbf_view_header,
@@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = {
702 NULL 702 NULL
703}; 703};
704 704
705static inline void 705static void
706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, 706_zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
707 struct zfcp_adapter *adapter, 707 struct zfcp_adapter *adapter,
708 struct scsi_cmnd *scsi_cmnd, 708 struct scsi_cmnd *scsi_cmnd,
@@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level,
786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); 786 spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
787} 787}
788 788
789inline void 789void
790zfcp_scsi_dbf_event_result(const char *tag, int level, 790zfcp_scsi_dbf_event_result(const char *tag, int level,
791 struct zfcp_adapter *adapter, 791 struct zfcp_adapter *adapter,
792 struct scsi_cmnd *scsi_cmnd, 792 struct scsi_cmnd *scsi_cmnd,
@@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level,
796 adapter, scsi_cmnd, fsf_req, 0); 796 adapter, scsi_cmnd, fsf_req, 0);
797} 797}
798 798
799inline void 799void
800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, 800zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
801 struct scsi_cmnd *scsi_cmnd, 801 struct scsi_cmnd *scsi_cmnd,
802 struct zfcp_fsf_req *new_fsf_req, 802 struct zfcp_fsf_req *new_fsf_req,
@@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
806 adapter, scsi_cmnd, new_fsf_req, old_req_id); 806 adapter, scsi_cmnd, new_fsf_req, old_req_id);
807} 807}
808 808
809inline void 809void
810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, 810zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
811 struct scsi_cmnd *scsi_cmnd) 811 struct scsi_cmnd *scsi_cmnd)
812{ 812{
@@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view,
884 return len; 884 return len;
885} 885}
886 886
887struct debug_view zfcp_scsi_dbf_view = { 887static struct debug_view zfcp_scsi_dbf_view = {
888 "structured", 888 "structured",
889 NULL, 889 NULL,
890 &zfcp_dbf_view_header, 890 &zfcp_dbf_view_header,
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c88babce9bca..88642dec080c 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
200 * returns: 0 - initiated action successfully 200 * returns: 0 - initiated action successfully
201 * <0 - failed to initiate action 201 * <0 - failed to initiate action
202 */ 202 */
203int 203static int
204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) 204zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask)
205{ 205{
206 int retval; 206 int retval;
@@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask)
295 * zfcp_erp_adisc - send ADISC ELS command 295 * zfcp_erp_adisc - send ADISC ELS command
296 * @port: port structure 296 * @port: port structure
297 */ 297 */
298int 298static int
299zfcp_erp_adisc(struct zfcp_port *port) 299zfcp_erp_adisc(struct zfcp_port *port)
300{ 300{
301 struct zfcp_adapter *adapter = port->adapter; 301 struct zfcp_adapter *adapter = port->adapter;
@@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port)
380 * 380 *
381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. 381 * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered.
382 */ 382 */
383void 383static void
384zfcp_erp_adisc_handler(unsigned long data) 384zfcp_erp_adisc_handler(unsigned long data)
385{ 385{
386 struct zfcp_send_els *send_els; 386 struct zfcp_send_els *send_els;
@@ -3141,7 +3141,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3141 break; 3141 break;
3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER: 3142 case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
3143 if (result != ZFCP_ERP_SUCCEEDED) { 3143 if (result != ZFCP_ERP_SUCCEEDED) {
3144 struct zfcp_port *port;
3145 list_for_each_entry(port, &adapter->port_list_head, list) 3144 list_for_each_entry(port, &adapter->port_list_head, list)
3146 if (port->rport && 3145 if (port->rport &&
3147 !atomic_test_mask(ZFCP_STATUS_PORT_WKA, 3146 !atomic_test_mask(ZFCP_STATUS_PORT_WKA,
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index b8794d77285d..cda0cc095ad1 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); 119extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); 120extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t);
121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); 121extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *);
122extern void set_host_byte(u32 *, char); 122extern void set_host_byte(int *, char);
123extern void set_driver_byte(u32 *, char); 123extern void set_driver_byte(int *, char);
124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); 124extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *);
125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); 125extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *);
126 126
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 067f1519eb04..4b3ae3f22e78 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -4563,7 +4563,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags,
4563/* 4563/*
4564 * set qtcb pointer in fsf_req and initialize QTCB 4564 * set qtcb pointer in fsf_req and initialize QTCB
4565 */ 4565 */
4566static inline void 4566static void
4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4567zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4568{ 4568{
4569 if (likely(fsf_req->qtcb != NULL)) { 4569 if (likely(fsf_req->qtcb != NULL)) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index dbd9f48e863e..1e12a78e8edd 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -21,22 +21,22 @@
21 21
22#include "zfcp_ext.h" 22#include "zfcp_ext.h"
23 23
24static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); 24static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int);
25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get 25static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get
26 (struct zfcp_qdio_queue *, int, int); 26 (struct zfcp_qdio_queue *, int, int);
27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp 27static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp
28 (struct zfcp_fsf_req *, int, int); 28 (struct zfcp_fsf_req *, int, int);
29static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain 29static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain
30 (struct zfcp_fsf_req *, unsigned long); 30 (struct zfcp_fsf_req *, unsigned long);
31static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next 31static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next
32 (struct zfcp_fsf_req *, unsigned long); 32 (struct zfcp_fsf_req *, unsigned long);
33static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); 33static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int);
34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); 34static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *);
35static inline void zfcp_qdio_sbale_fill 35static void zfcp_qdio_sbale_fill
36 (struct zfcp_fsf_req *, unsigned long, void *, int); 36 (struct zfcp_fsf_req *, unsigned long, void *, int);
37static inline int zfcp_qdio_sbals_from_segment 37static int zfcp_qdio_sbals_from_segment
38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); 38 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long);
39static inline int zfcp_qdio_sbals_from_buffer 39static int zfcp_qdio_sbals_from_buffer
40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); 40 (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int);
41 41
42static qdio_handler_t zfcp_qdio_request_handler; 42static qdio_handler_t zfcp_qdio_request_handler;
@@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter)
201 * returns: error flag 201 * returns: error flag
202 * 202 *
203 */ 203 */
204static inline int 204static int
205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, 205zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status,
206 unsigned int qdio_error, unsigned int siga_error, 206 unsigned int qdio_error, unsigned int siga_error,
207 int first_element, int elements_processed) 207 int first_element, int elements_processed)
@@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale)
462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for 462 * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for
463 * a struct zfcp_fsf_req 463 * a struct zfcp_fsf_req
464 */ 464 */
465inline volatile struct qdio_buffer_element * 465volatile struct qdio_buffer_element *
466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) 466zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
467{ 467{
468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, 468 return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue,
@@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale)
484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for 484 * zfcp_qdio_sbale_curr - return current SBALE on request_queue for
485 * a struct zfcp_fsf_req 485 * a struct zfcp_fsf_req
486 */ 486 */
487inline volatile struct qdio_buffer_element * 487volatile struct qdio_buffer_element *
488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) 488zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
489{ 489{
490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 490 return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr,
@@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req)
499 * 499 *
500 * Note: We can assume at least one free SBAL in the request_queue when called. 500 * Note: We can assume at least one free SBAL in the request_queue when called.
501 */ 501 */
502static inline void 502static void
503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) 503zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
504{ 504{
505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count); 505 int count = atomic_read(&fsf_req->adapter->request_queue.free_count);
@@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
517 * 517 *
518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. 518 * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req.
519 */ 519 */
520static inline volatile struct qdio_buffer_element * 520static volatile struct qdio_buffer_element *
521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 521zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
522{ 522{
523 volatile struct qdio_buffer_element *sbale; 523 volatile struct qdio_buffer_element *sbale;
@@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
554/** 554/**
555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed 555 * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed
556 */ 556 */
557static inline volatile struct qdio_buffer_element * 557static volatile struct qdio_buffer_element *
558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) 558zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
559{ 559{
560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) 560 if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
@@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue 569 * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue
570 * with zero from 570 * with zero from
571 */ 571 */
572static inline int 572static int
573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) 573zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last)
574{ 574{
575 struct qdio_buffer **buf = queue->buffer; 575 struct qdio_buffer **buf = queue->buffer;
@@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req)
603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE 603 * zfcp_qdio_sbale_fill - set address and lenght in current SBALE
604 * on request_queue 604 * on request_queue
605 */ 605 */
606static inline void 606static void
607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 607zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
608 void *addr, int length) 608 void *addr, int length)
609{ 609{
@@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
624 * Alignment and length of the segment determine how many SBALEs are needed 624 * Alignment and length of the segment determine how many SBALEs are needed
625 * for the memory segment. 625 * for the memory segment.
626 */ 626 */
627static inline int 627static int
628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 628zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
629 void *start_addr, unsigned long total_length) 629 void *start_addr, unsigned long total_length)
630{ 630{
@@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
659 * @sg_count: number of elements in scatter-gather list 659 * @sg_count: number of elements in scatter-gather list
660 * @max_sbals: upper bound for number of SBALs to be used 660 * @max_sbals: upper bound for number of SBALs to be used
661 */ 661 */
662inline int 662int
663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 663zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
664 struct scatterlist *sg, int sg_count, int max_sbals) 664 struct scatterlist *sg, int sg_count, int max_sbals)
665{ 665{
@@ -707,7 +707,7 @@ out:
707 * @length: length of buffer 707 * @length: length of buffer
708 * @max_sbals: upper bound for number of SBALs to be used 708 * @max_sbals: upper bound for number of SBALs to be used
709 */ 709 */
710static inline int 710static int
711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, 711zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
712 void *buffer, unsigned long length, int max_sbals) 712 void *buffer, unsigned long length, int max_sbals)
713{ 713{
@@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used 728 * @scsi_cmnd: either scatter-gather list or buffer contained herein is used
729 * to fill SBALs 729 * to fill SBALs
730 */ 730 */
731inline int 731int
732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, 732zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req,
733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) 733 unsigned long sbtype, struct scsi_cmnd *scsi_cmnd)
734{ 734{
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 452d96f92a14..99db02062c3b 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
90 return fcp_sns_info_ptr; 90 return fcp_sns_info_ptr;
91} 91}
92 92
93fcp_dl_t * 93static fcp_dl_t *
94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) 94zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd)
95{ 95{
96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2; 96 int additional_length = fcp_cmd->add_fcp_cdb_length << 2;
@@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl)
124 * regarding the specified byte 124 * regarding the specified byte
125 */ 125 */
126static inline void 126static inline void
127set_byte(u32 * result, char status, char pos) 127set_byte(int *result, char status, char pos)
128{ 128{
129 *result |= status << (pos * 8); 129 *result |= status << (pos * 8);
130} 130}
131 131
132void 132void
133set_host_byte(u32 * result, char status) 133set_host_byte(int *result, char status)
134{ 134{
135 set_byte(result, status, 2); 135 set_byte(result, status, 2);
136} 136}
137 137
138void 138void
139set_driver_byte(u32 * result, char status) 139set_driver_byte(int *result, char status)
140{ 140{
141 set_byte(result, status, 3); 141 set_byte(result, status, 3);
142} 142}
@@ -280,7 +280,7 @@ out:
280 return retval; 280 return retval;
281} 281}
282 282
283void 283static void
284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) 284zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt)
285{ 285{
286 struct completion *wait = (struct completion *) scpnt->SCp.ptr; 286 struct completion *wait = (struct completion *) scpnt->SCp.ptr;
@@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt,
324 * returns: 0 - success, SCSI command enqueued 324 * returns: 0 - success, SCSI command enqueued
325 * !0 - failure 325 * !0 - failure
326 */ 326 */
327int 327static int
328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, 328zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
329 void (*done) (struct scsi_cmnd *)) 329 void (*done) (struct scsi_cmnd *))
330{ 330{
@@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id,
380 * will handle late commands. (Usually, the normal completion of late 380 * will handle late commands. (Usually, the normal completion of late
381 * commands is ignored with respect to the running abort operation.) 381 * commands is ignored with respect to the running abort operation.)
382 */ 382 */
383int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) 383static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
384{ 384{
385 struct Scsi_Host *scsi_host; 385 struct Scsi_Host *scsi_host;
386 struct zfcp_adapter *adapter; 386 struct zfcp_adapter *adapter;
@@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
445 return retval; 445 return retval;
446} 446}
447 447
448int 448static int
449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) 449zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
450{ 450{
451 int retval; 451 int retval;
@@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
541/** 541/**
542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset 542 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
543 */ 543 */
544int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) 544static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
545{ 545{
546 struct zfcp_unit *unit; 546 struct zfcp_unit *unit;
547 struct zfcp_adapter *adapter; 547 struct zfcp_adapter *adapter;
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 1e788e815ce7..090743d2f914 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -9,8 +9,14 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/proc_fs.h> 10#include <linux/proc_fs.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h>
12#include <asm/ebcdic.h> 13#include <asm/ebcdic.h>
13 14
15/* Sigh, math-emu. Don't ask. */
16#include <asm/sfp-util.h>
17#include <math-emu/soft-fp.h>
18#include <math-emu/single.h>
19
14struct sysinfo_1_1_1 { 20struct sysinfo_1_1_1 {
15 char reserved_0[32]; 21 char reserved_0[32];
16 char manufacturer[16]; 22 char manufacturer[16];
@@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
198 * if the higher order 8 bits are not zero. Printing 204 * if the higher order 8 bits are not zero. Printing
199 * a floating point number in the kernel is a no-no, 205 * a floating point number in the kernel is a no-no,
200 * always print the number as 32 bit unsigned integer. 206 * always print the number as 32 bit unsigned integer.
201 * The user-space needs to know about the stange 207 * The user-space needs to know about the strange
202 * encoding of the alternate cpu capability. 208 * encoding of the alternate cpu capability.
203 */ 209 */
204 len += sprintf(page + len, "Capability: %u %u\n", 210 len += sprintf(page + len, "Capability: %u %u\n",
@@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void)
351 357
352__initcall(create_proc_sysinfo); 358__initcall(create_proc_sysinfo);
353 359
360/*
361 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
362 */
363void s390_adjust_jiffies(void)
364{
365 struct sysinfo_1_2_2 *info;
366 const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
367 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
368 FP_DECL_EX;
369 unsigned int capability;
370
371 info = (void *) get_zeroed_page(GFP_KERNEL);
372 if (!info)
373 return;
374
375 if (stsi(info, 1, 2, 2) != -ENOSYS) {
376 /*
377 * Major sigh. The cpu capability encoding is "special".
378 * If the first 9 bits of info->capability are 0 then it
379 * is a 32 bit unsigned integer in the range 0 .. 2^23.
380 * If the first 9 bits are != 0 then it is a 32 bit float.
381 * In addition a lower value indicates a proportionally
382 * higher cpu capacity. Bogomips are the other way round.
383 * To get to a halfway suitable number we divide 1e7
384 * by the cpu capability number. Yes, that means a floating
385 * point division .. math-emu here we come :-)
386 */
387 FP_UNPACK_SP(SA, &fmil);
388 if ((info->capability >> 23) == 0)
389 FP_FROM_INT_S(SB, info->capability, 32, int);
390 else
391 FP_UNPACK_SP(SB, &info->capability);
392 FP_DIV_S(SR, SA, SB);
393 FP_TO_INT_S(capability, SR, 32, 0);
394 } else
395 /*
396 * Really old machine without stsi block for basic
397 * cpu information. Report 42.0 bogomips.
398 */
399 capability = 42;
400 loops_per_jiffy = capability * (500000/HZ);
401 free_page((unsigned long) info);
402}
403
404/*
405 * calibrate the delay loop
406 */
407void __init calibrate_delay(void)
408{
409 s390_adjust_jiffies();
410 /* Print the good old Bogomips line .. */
411 printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
412 "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
413 (loops_per_jiffy/(5000/HZ)) % 100);
414}