aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c45
-rw-r--r--drivers/s390/block/dasd_eckd.c81
-rw-r--r--drivers/s390/block/dasd_fba.c2
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/char/tape.h1
-rw-r--r--drivers/s390/char/tape_3590.c29
-rw-r--r--drivers/s390/char/tape_3590.h4
-rw-r--r--drivers/s390/char/tape_core.c3
-rw-r--r--drivers/s390/cio/qdio.c240
-rw-r--r--drivers/s390/cio/qdio.h52
-rw-r--r--drivers/s390/net/qeth.h3
-rw-r--r--drivers/s390/net/qeth_main.c77
-rw-r--r--drivers/s390/net/qeth_mpc.h1
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
15 files changed, 415 insertions, 137 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e71929db8b06..977521013fe8 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2174,6 +2174,51 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
2174 return ret; 2174 return ret;
2175} 2175}
2176 2176
2177struct dasd_ccw_req * dasd_generic_build_rdc(struct dasd_device *device,
2178 void *rdc_buffer,
2179 int rdc_buffer_size, char *magic)
2180{
2181 struct dasd_ccw_req *cqr;
2182 struct ccw1 *ccw;
2183
2184 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2185
2186 if (IS_ERR(cqr)) {
2187 DEV_MESSAGE(KERN_WARNING, device, "%s",
2188 "Could not allocate RDC request");
2189 return cqr;
2190 }
2191
2192 ccw = cqr->cpaddr;
2193 ccw->cmd_code = CCW_CMD_RDC;
2194 ccw->cda = (__u32)(addr_t)rdc_buffer;
2195 ccw->count = rdc_buffer_size;
2196
2197 cqr->device = device;
2198 cqr->expires = 10*HZ;
2199 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2200 cqr->retries = 2;
2201 cqr->buildclk = get_clock();
2202 cqr->status = DASD_CQR_FILLED;
2203 return cqr;
2204}
2205
2206
2207int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2208 void **rdc_buffer, int rdc_buffer_size)
2209{
2210 int ret;
2211 struct dasd_ccw_req *cqr;
2212
2213 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size,
2214 magic);
2215 if (IS_ERR(cqr))
2216 return PTR_ERR(cqr);
2217
2218 ret = dasd_sleep_on(cqr);
2219 dasd_sfree_request(cqr, cqr->device);
2220 return ret;
2221}
2177 2222
2178static int __init 2223static int __init
2179dasd_init(void) 2224dasd_init(void)
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cecab2274a6e..c9583fbc2a7d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -450,6 +450,81 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
450 return 0; 450 return 0;
451} 451}
452 452
453struct dasd_ccw_req * dasd_eckd_build_rcd_lpm(struct dasd_device *device,
454 void *rcd_buffer,
455 struct ciw *ciw, __u8 lpm)
456{
457 struct dasd_ccw_req *cqr;
458 struct ccw1 *ccw;
459
460 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device);
461
462 if (IS_ERR(cqr)) {
463 DEV_MESSAGE(KERN_WARNING, device, "%s",
464 "Could not allocate RCD request");
465 return cqr;
466 }
467
468 ccw = cqr->cpaddr;
469 ccw->cmd_code = ciw->cmd;
470 ccw->cda = (__u32)(addr_t)rcd_buffer;
471 ccw->count = ciw->count;
472
473 cqr->device = device;
474 cqr->expires = 10*HZ;
475 cqr->lpm = lpm;
476 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
477 cqr->retries = 2;
478 cqr->buildclk = get_clock();
479 cqr->status = DASD_CQR_FILLED;
480 return cqr;
481}
482
483static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
484 void **rcd_buffer,
485 int *rcd_buffer_size, __u8 lpm)
486{
487 struct ciw *ciw;
488 char *rcd_buf = NULL;
489 int ret;
490 struct dasd_ccw_req *cqr;
491
492 /*
493 * scan for RCD command in extended SenseID data
494 */
495 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
496 if (!ciw || ciw->cmd == 0) {
497 ret = -EOPNOTSUPP;
498 goto out_error;
499 }
500 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
501 if (!rcd_buf) {
502 ret = -ENOMEM;
503 goto out_error;
504 }
505 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
506 if (IS_ERR(cqr)) {
507 ret = PTR_ERR(cqr);
508 goto out_error;
509 }
510 ret = dasd_sleep_on(cqr);
511 /*
512 * on success we update the user input parms
513 */
514 dasd_sfree_request(cqr, cqr->device);
515 if (ret)
516 goto out_error;
517
518 *rcd_buffer_size = ciw->count;
519 *rcd_buffer = rcd_buf;
520 return 0;
521out_error:
522 kfree(rcd_buf);
523 *rcd_buffer = NULL;
524 *rcd_buffer_size = 0;
525 return ret;
526}
527
453static int 528static int
454dasd_eckd_read_conf(struct dasd_device *device) 529dasd_eckd_read_conf(struct dasd_device *device)
455{ 530{
@@ -469,8 +544,8 @@ dasd_eckd_read_conf(struct dasd_device *device)
469 /* get configuration data per operational path */ 544 /* get configuration data per operational path */
470 for (lpm = 0x80; lpm; lpm>>= 1) { 545 for (lpm = 0x80; lpm; lpm>>= 1) {
471 if (lpm & path_data->opm){ 546 if (lpm & path_data->opm){
472 rc = read_conf_data_lpm(device->cdev, &conf_data, 547 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
473 &conf_len, lpm); 548 &conf_len, lpm);
474 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 549 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
475 MESSAGE(KERN_WARNING, 550 MESSAGE(KERN_WARNING,
476 "Read configuration data returned " 551 "Read configuration data returned "
@@ -639,7 +714,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
639 /* Read Device Characteristics */ 714 /* Read Device Characteristics */
640 rdc_data = (void *) &(private->rdc_data); 715 rdc_data = (void *) &(private->rdc_data);
641 memset(rdc_data, 0, sizeof(rdc_data)); 716 memset(rdc_data, 0, sizeof(rdc_data));
642 rc = read_dev_chars(device->cdev, &rdc_data, 64); 717 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
643 if (rc) 718 if (rc)
644 DEV_MESSAGE(KERN_WARNING, device, 719 DEV_MESSAGE(KERN_WARNING, device,
645 "Read device characteristics returned " 720 "Read device characteristics returned "
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index be0909e39226..da16ead8aff2 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -135,7 +135,7 @@ dasd_fba_check_characteristics(struct dasd_device *device)
135 } 135 }
136 /* Read Device Characteristics */ 136 /* Read Device Characteristics */
137 rdc_data = (void *) &(private->rdc_data); 137 rdc_data = (void *) &(private->rdc_data);
138 rc = read_dev_chars(device->cdev, &rdc_data, 32); 138 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
139 if (rc) { 139 if (rc) {
140 DEV_MESSAGE(KERN_WARNING, device, 140 DEV_MESSAGE(KERN_WARNING, device,
141 "Read device characteristics returned error %d", 141 "Read device characteristics returned error %d",
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index a2cc69e11410..241294cba415 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -509,6 +509,8 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
509int dasd_generic_set_offline (struct ccw_device *cdev); 509int dasd_generic_set_offline (struct ccw_device *cdev);
510int dasd_generic_notify(struct ccw_device *, int); 510int dasd_generic_notify(struct ccw_device *, int);
511 511
512int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int);
513
512/* externals in dasd_devmap.c */ 514/* externals in dasd_devmap.c */
513extern int dasd_max_devindex; 515extern int dasd_max_devindex;
514extern int dasd_probeonly; 516extern int dasd_probeonly;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index bb4ff537729d..3b52f5c1dbef 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -103,6 +103,7 @@ enum tape_op {
103 TO_CRYPT_OFF, /* Disable encrpytion */ 103 TO_CRYPT_OFF, /* Disable encrpytion */
104 TO_KEKL_SET, /* Set KEK label */ 104 TO_KEKL_SET, /* Set KEK label */
105 TO_KEKL_QUERY, /* Query KEK label */ 105 TO_KEKL_QUERY, /* Query KEK label */
106 TO_RDC, /* Read device characteristics */
106 TO_SIZE, /* #entries in tape_op_t */ 107 TO_SIZE, /* #entries in tape_op_t */
107}; 108};
108 109
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 50f5edab83d7..7e2b2ab49264 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -788,6 +788,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request)
788 case TO_SIZE: 788 case TO_SIZE:
789 case TO_KEKL_SET: 789 case TO_KEKL_SET:
790 case TO_KEKL_QUERY: 790 case TO_KEKL_QUERY:
791 case TO_RDC:
791 break; 792 break;
792 } 793 }
793 return TAPE_IO_SUCCESS; 794 return TAPE_IO_SUCCESS;
@@ -1549,6 +1550,26 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1549 return TAPE_IO_STOP; 1550 return TAPE_IO_STOP;
1550} 1551}
1551 1552
1553
1554static int tape_3590_read_dev_chars(struct tape_device *device,
1555 struct tape_3590_rdc_data *rdc_data)
1556{
1557 int rc;
1558 struct tape_request *request;
1559
1560 request = tape_alloc_request(1, sizeof(*rdc_data));
1561 if (IS_ERR(request))
1562 return PTR_ERR(request);
1563 request->op = TO_RDC;
1564 tape_ccw_end(request->cpaddr, CCW_CMD_RDC, sizeof(*rdc_data),
1565 request->cpdata);
1566 rc = tape_do_io(device, request);
1567 if (rc == 0)
1568 memcpy(rdc_data, request->cpdata, sizeof(*rdc_data));
1569 tape_free_request(request);
1570 return rc;
1571}
1572
1552/* 1573/*
1553 * Setup device function 1574 * Setup device function
1554 */ 1575 */
@@ -1557,7 +1578,7 @@ tape_3590_setup_device(struct tape_device *device)
1557{ 1578{
1558 int rc; 1579 int rc;
1559 struct tape_3590_disc_data *data; 1580 struct tape_3590_disc_data *data;
1560 char *rdc_data; 1581 struct tape_3590_rdc_data *rdc_data;
1561 1582
1562 DBF_EVENT(6, "3590 device setup\n"); 1583 DBF_EVENT(6, "3590 device setup\n");
1563 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); 1584 data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA);
@@ -1566,12 +1587,12 @@ tape_3590_setup_device(struct tape_device *device)
1566 data->read_back_op = READ_PREVIOUS; 1587 data->read_back_op = READ_PREVIOUS;
1567 device->discdata = data; 1588 device->discdata = data;
1568 1589
1569 rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA); 1590 rdc_data = kmalloc(sizeof(*rdc_data), GFP_KERNEL | GFP_DMA);
1570 if (!rdc_data) { 1591 if (!rdc_data) {
1571 rc = -ENOMEM; 1592 rc = -ENOMEM;
1572 goto fail_kmalloc; 1593 goto fail_kmalloc;
1573 } 1594 }
1574 rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64); 1595 rc = tape_3590_read_dev_chars(device, rdc_data);
1575 if (rc) { 1596 if (rc) {
1576 DBF_LH(3, "Read device characteristics failed!\n"); 1597 DBF_LH(3, "Read device characteristics failed!\n");
1577 goto fail_kmalloc; 1598 goto fail_kmalloc;
@@ -1579,7 +1600,7 @@ tape_3590_setup_device(struct tape_device *device)
1579 rc = tape_std_assign(device); 1600 rc = tape_std_assign(device);
1580 if (rc) 1601 if (rc)
1581 goto fail_rdc_data; 1602 goto fail_rdc_data;
1582 if (rdc_data[31] == 0x13) { 1603 if (rdc_data->data[31] == 0x13) {
1583 PRINT_INFO("Device has crypto support\n"); 1604 PRINT_INFO("Device has crypto support\n");
1584 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; 1605 data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK;
1585 tape_3592_disable_crypt(device); 1606 tape_3592_disable_crypt(device);
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index aa5138807af1..4534055f1376 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -129,6 +129,10 @@ struct tape_3590_med_sense {
129 char pad2[116]; 129 char pad2[116];
130} __attribute__ ((packed)); 130} __attribute__ ((packed));
131 131
132struct tape_3590_rdc_data {
133 char data[64];
134} __attribute__ ((packed));
135
132/* Datastructures for 3592 encryption support */ 136/* Datastructures for 3592 encryption support */
133 137
134struct tape3592_kekl { 138struct tape3592_kekl {
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index e2a8a1a04bab..2fae6338ee1c 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -73,7 +73,7 @@ const char *tape_op_verbose[TO_SIZE] =
73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", 74 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON",
75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", 75 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
76 [TO_KEKL_QUERY] = "KLQ", 76 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
77}; 77};
78 78
79static int 79static int
@@ -911,6 +911,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
911 case TO_ASSIGN: 911 case TO_ASSIGN:
912 case TO_UNASSIGN: 912 case TO_UNASSIGN:
913 case TO_READ_ATTMSG: 913 case TO_READ_ATTMSG:
914 case TO_RDC:
914 if (device->tape_state == TS_INIT) 915 if (device->tape_state == TS_INIT)
915 break; 916 break;
916 if (device->tape_state == TS_UNUSED) 917 if (device->tape_state == TS_UNUSED)
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 05fac0733f3d..cba64e4cfcd4 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -69,7 +69,6 @@ static const char version[] = "QDIO base support version 2";
69 69
70static int qdio_performance_stats = 0; 70static int qdio_performance_stats = 0;
71static int proc_perf_file_registration; 71static int proc_perf_file_registration;
72static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
73static struct qdio_perf_stats perf_stats; 72static struct qdio_perf_stats perf_stats;
74 73
75static int hydra_thinints; 74static int hydra_thinints;
@@ -111,6 +110,31 @@ qdio_min(int a,int b)
111} 110}
112 111
113/***************** SCRUBBER HELPER ROUTINES **********************/ 112/***************** SCRUBBER HELPER ROUTINES **********************/
113#ifdef CONFIG_64BIT
114static inline void qdio_perf_stat_inc(atomic64_t *count)
115{
116 if (qdio_performance_stats)
117 atomic64_inc(count);
118}
119
120static inline void qdio_perf_stat_dec(atomic64_t *count)
121{
122 if (qdio_performance_stats)
123 atomic64_dec(count);
124}
125#else /* CONFIG_64BIT */
126static inline void qdio_perf_stat_inc(atomic_t *count)
127{
128 if (qdio_performance_stats)
129 atomic_inc(count);
130}
131
132static inline void qdio_perf_stat_dec(atomic_t *count)
133{
134 if (qdio_performance_stats)
135 atomic_dec(count);
136}
137#endif /* CONFIG_64BIT */
114 138
115static inline __u64 139static inline __u64
116qdio_get_micros(void) 140qdio_get_micros(void)
@@ -277,8 +301,7 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
277 QDIO_DBF_TEXT4(0,trace,"sigasync"); 301 QDIO_DBF_TEXT4(0,trace,"sigasync");
278 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 302 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
279 303
280 if (qdio_performance_stats) 304 qdio_perf_stat_inc(&perf_stats.siga_syncs);
281 perf_stats.siga_syncs++;
282 305
283 cc = do_siga_sync(q->schid, gpr2, gpr3); 306 cc = do_siga_sync(q->schid, gpr2, gpr3);
284 if (cc) 307 if (cc)
@@ -323,8 +346,7 @@ qdio_siga_output(struct qdio_q *q)
323 __u32 busy_bit; 346 __u32 busy_bit;
324 __u64 start_time=0; 347 __u64 start_time=0;
325 348
326 if (qdio_performance_stats) 349 qdio_perf_stat_inc(&perf_stats.siga_outs);
327 perf_stats.siga_outs++;
328 350
329 QDIO_DBF_TEXT4(0,trace,"sigaout"); 351 QDIO_DBF_TEXT4(0,trace,"sigaout");
330 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 352 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -358,8 +380,7 @@ qdio_siga_input(struct qdio_q *q)
358 QDIO_DBF_TEXT4(0,trace,"sigain"); 380 QDIO_DBF_TEXT4(0,trace,"sigain");
359 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 381 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
360 382
361 if (qdio_performance_stats) 383 qdio_perf_stat_inc(&perf_stats.siga_ins);
362 perf_stats.siga_ins++;
363 384
364 cc = do_siga_input(q->schid, q->mask); 385 cc = do_siga_input(q->schid, q->mask);
365 386
@@ -953,8 +974,7 @@ __qdio_outbound_processing(struct qdio_q *q)
953 974
954 if (unlikely(qdio_reserve_q(q))) { 975 if (unlikely(qdio_reserve_q(q))) {
955 qdio_release_q(q); 976 qdio_release_q(q);
956 if (qdio_performance_stats) 977 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
957 o_p_c++;
958 /* as we're sissies, we'll check next time */ 978 /* as we're sissies, we'll check next time */
959 if (likely(!atomic_read(&q->is_in_shutdown))) { 979 if (likely(!atomic_read(&q->is_in_shutdown))) {
960 qdio_mark_q(q); 980 qdio_mark_q(q);
@@ -962,10 +982,8 @@ __qdio_outbound_processing(struct qdio_q *q)
962 } 982 }
963 return; 983 return;
964 } 984 }
965 if (qdio_performance_stats) { 985 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
966 o_p_nc++; 986 qdio_perf_stat_inc(&perf_stats.tl_runs);
967 perf_stats.tl_runs++;
968 }
969 987
970 /* see comment in qdio_kick_outbound_q */ 988 /* see comment in qdio_kick_outbound_q */
971 siga_attempts=atomic_read(&q->busy_siga_counter); 989 siga_attempts=atomic_read(&q->busy_siga_counter);
@@ -1139,17 +1157,6 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
1139{ 1157{
1140 int i; 1158 int i;
1141 1159
1142 static int old_pcis=0;
1143 static int old_thinints=0;
1144
1145 if (qdio_performance_stats) {
1146 if ((old_pcis==perf_stats.pcis)&&
1147 (old_thinints==perf_stats.thinints))
1148 perf_stats.start_time_inbound=NOW;
1149 else
1150 old_pcis=perf_stats.pcis;
1151 }
1152
1153 i=qdio_get_inbound_buffer_frontier(q); 1160 i=qdio_get_inbound_buffer_frontier(q);
1154 if ( (i!=GET_SAVED_FRONTIER(q)) || 1161 if ( (i!=GET_SAVED_FRONTIER(q)) ||
1155 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { 1162 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
@@ -1337,10 +1344,7 @@ qdio_kick_inbound_handler(struct qdio_q *q)
1337 q->siga_error=0; 1344 q->siga_error=0;
1338 q->error_status_flags=0; 1345 q->error_status_flags=0;
1339 1346
1340 if (qdio_performance_stats) { 1347 qdio_perf_stat_inc(&perf_stats.inbound_cnt);
1341 perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
1342 perf_stats.inbound_cnt++;
1343 }
1344} 1348}
1345 1349
1346static void 1350static void
@@ -1360,8 +1364,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1360 */ 1364 */
1361 if (unlikely(qdio_reserve_q(q))) { 1365 if (unlikely(qdio_reserve_q(q))) {
1362 qdio_release_q(q); 1366 qdio_release_q(q);
1363 if (qdio_performance_stats) 1367 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1364 ii_p_c++;
1365 /* 1368 /*
1366 * as we might just be about to stop polling, we make 1369 * as we might just be about to stop polling, we make
1367 * sure that we check again at least once more 1370 * sure that we check again at least once more
@@ -1369,8 +1372,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1369 tiqdio_sched_tl(); 1372 tiqdio_sched_tl();
1370 return; 1373 return;
1371 } 1374 }
1372 if (qdio_performance_stats) 1375 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
1373 ii_p_nc++;
1374 if (unlikely(atomic_read(&q->is_in_shutdown))) { 1376 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1375 qdio_unmark_q(q); 1377 qdio_unmark_q(q);
1376 goto out; 1378 goto out;
@@ -1412,8 +1414,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1412 for (i=0;i<irq_ptr->no_output_qs;i++) { 1414 for (i=0;i<irq_ptr->no_output_qs;i++) {
1413 oq = irq_ptr->output_qs[i]; 1415 oq = irq_ptr->output_qs[i];
1414 if (!qdio_is_outbound_q_done(oq)) { 1416 if (!qdio_is_outbound_q_done(oq)) {
1415 if (qdio_performance_stats) 1417 qdio_perf_stat_dec(&perf_stats.tl_runs);
1416 perf_stats.tl_runs--;
1417 __qdio_outbound_processing(oq); 1418 __qdio_outbound_processing(oq);
1418 } 1419 }
1419 } 1420 }
@@ -1452,8 +1453,7 @@ __qdio_inbound_processing(struct qdio_q *q)
1452 1453
1453 if (unlikely(qdio_reserve_q(q))) { 1454 if (unlikely(qdio_reserve_q(q))) {
1454 qdio_release_q(q); 1455 qdio_release_q(q);
1455 if (qdio_performance_stats) 1456 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
1456 i_p_c++;
1457 /* as we're sissies, we'll check next time */ 1457 /* as we're sissies, we'll check next time */
1458 if (likely(!atomic_read(&q->is_in_shutdown))) { 1458 if (likely(!atomic_read(&q->is_in_shutdown))) {
1459 qdio_mark_q(q); 1459 qdio_mark_q(q);
@@ -1461,10 +1461,8 @@ __qdio_inbound_processing(struct qdio_q *q)
1461 } 1461 }
1462 return; 1462 return;
1463 } 1463 }
1464 if (qdio_performance_stats) { 1464 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
1465 i_p_nc++; 1465 qdio_perf_stat_inc(&perf_stats.tl_runs);
1466 perf_stats.tl_runs++;
1467 }
1468 1466
1469again: 1467again:
1470 if (qdio_has_inbound_q_moved(q)) { 1468 if (qdio_has_inbound_q_moved(q)) {
@@ -1510,8 +1508,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1510 1508
1511 if (unlikely(qdio_reserve_q(q))) { 1509 if (unlikely(qdio_reserve_q(q))) {
1512 qdio_release_q(q); 1510 qdio_release_q(q);
1513 if (qdio_performance_stats) 1511 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1514 ii_p_c++;
1515 /* 1512 /*
1516 * as we might just be about to stop polling, we make 1513 * as we might just be about to stop polling, we make
1517 * sure that we check again at least once more 1514 * sure that we check again at least once more
@@ -1602,8 +1599,7 @@ tiqdio_tl(unsigned long data)
1602{ 1599{
1603 QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); 1600 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1604 1601
1605 if (qdio_performance_stats) 1602 qdio_perf_stat_inc(&perf_stats.tl_runs);
1606 perf_stats.tl_runs++;
1607 1603
1608 tiqdio_inbound_checks(); 1604 tiqdio_inbound_checks();
1609} 1605}
@@ -1914,10 +1910,7 @@ tiqdio_thinint_handler(void)
1914{ 1910{
1915 QDIO_DBF_TEXT4(0,trace,"thin_int"); 1911 QDIO_DBF_TEXT4(0,trace,"thin_int");
1916 1912
1917 if (qdio_performance_stats) { 1913 qdio_perf_stat_inc(&perf_stats.thinints);
1918 perf_stats.thinints++;
1919 perf_stats.start_time_inbound=NOW;
1920 }
1921 1914
1922 /* SVS only when needed: 1915 /* SVS only when needed:
1923 * issue SVS to benefit from iqdio interrupt avoidance 1916 * issue SVS to benefit from iqdio interrupt avoidance
@@ -1972,17 +1965,12 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
1972 int i; 1965 int i;
1973 struct qdio_q *q; 1966 struct qdio_q *q;
1974 1967
1975 if (qdio_performance_stats) { 1968 qdio_perf_stat_inc(&perf_stats.pcis);
1976 perf_stats.pcis++;
1977 perf_stats.start_time_inbound=NOW;
1978 }
1979 for (i=0;i<irq_ptr->no_input_qs;i++) { 1969 for (i=0;i<irq_ptr->no_input_qs;i++) {
1980 q=irq_ptr->input_qs[i]; 1970 q=irq_ptr->input_qs[i];
1981 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) 1971 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1982 qdio_mark_q(q); 1972 qdio_mark_q(q);
1983 else { 1973 else {
1984 if (qdio_performance_stats)
1985 perf_stats.tl_runs--;
1986 __qdio_inbound_processing(q); 1974 __qdio_inbound_processing(q);
1987 } 1975 }
1988 } 1976 }
@@ -1992,8 +1980,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
1992 q=irq_ptr->output_qs[i]; 1980 q=irq_ptr->output_qs[i];
1993 if (qdio_is_outbound_q_done(q)) 1981 if (qdio_is_outbound_q_done(q))
1994 continue; 1982 continue;
1995 if (qdio_performance_stats) 1983 qdio_perf_stat_dec(&perf_stats.tl_runs);
1996 perf_stats.tl_runs--;
1997 if (!irq_ptr->sync_done_on_outb_pcis) 1984 if (!irq_ptr->sync_done_on_outb_pcis)
1998 SYNC_MEMORY; 1985 SYNC_MEMORY;
1999 __qdio_outbound_processing(q); 1986 __qdio_outbound_processing(q);
@@ -3463,18 +3450,12 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3463 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; 3450 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3464 3451
3465 /* This is the outbound handling of queues */ 3452 /* This is the outbound handling of queues */
3466 if (qdio_performance_stats)
3467 perf_stats.start_time_outbound=NOW;
3468
3469 qdio_do_qdio_fill_output(q,qidx,count,buffers); 3453 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3470 3454
3471 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; 3455 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3472 3456
3473 if (callflags&QDIO_FLAG_DONT_SIGA) { 3457 if (callflags&QDIO_FLAG_DONT_SIGA) {
3474 if (qdio_performance_stats) { 3458 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3475 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3476 perf_stats.outbound_cnt++;
3477 }
3478 return; 3459 return;
3479 } 3460 }
3480 if (q->is_iqdio_q) { 3461 if (q->is_iqdio_q) {
@@ -3504,8 +3485,7 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3504 qdio_kick_outbound_q(q); 3485 qdio_kick_outbound_q(q);
3505 } else { 3486 } else {
3506 QDIO_DBF_TEXT3(0,trace, "fast-req"); 3487 QDIO_DBF_TEXT3(0,trace, "fast-req");
3507 if (qdio_performance_stats) 3488 qdio_perf_stat_inc(&perf_stats.fast_reqs);
3508 perf_stats.fast_reqs++;
3509 } 3489 }
3510 } 3490 }
3511 /* 3491 /*
@@ -3516,10 +3496,7 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3516 __qdio_outbound_processing(q); 3496 __qdio_outbound_processing(q);
3517 } 3497 }
3518 3498
3519 if (qdio_performance_stats) { 3499 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3520 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3521 perf_stats.outbound_cnt++;
3522 }
3523} 3500}
3524 3501
3525/* count must be 1 in iqdio */ 3502/* count must be 1 in iqdio */
@@ -3589,33 +3566,67 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3589 return 0; 3566 return 0;
3590 3567
3591#define _OUTP_IT(x...) c+=sprintf(buffer+c,x) 3568#define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3592 _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c); 3569#ifdef CONFIG_64BIT
3593 _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c); 3570 _OUTP_IT("Number of tasklet runs (total) : %li\n",
3594 _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c); 3571 (long)atomic64_read(&perf_stats.tl_runs));
3595 _OUTP_IT("Number of tasklet runs (total) : %lu\n", 3572 _OUTP_IT("Inbound tasklet runs tried/retried : %li/%li\n",
3596 perf_stats.tl_runs); 3573 (long)atomic64_read(&perf_stats.inbound_tl_runs),
3574 (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
3575 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %li/%li\n",
3576 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
3577 (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
3578 _OUTP_IT("Outbound tasklet runs tried/retried : %li/%li\n",
3579 (long)atomic64_read(&perf_stats.outbound_tl_runs),
3580 (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
3597 _OUTP_IT("\n"); 3581 _OUTP_IT("\n");
3598 _OUTP_IT("Number of SIGA sync's issued : %lu\n", 3582 _OUTP_IT("Number of SIGA sync's issued : %li\n",
3599 perf_stats.siga_syncs); 3583 (long)atomic64_read(&perf_stats.siga_syncs));
3600 _OUTP_IT("Number of SIGA in's issued : %lu\n", 3584 _OUTP_IT("Number of SIGA in's issued : %li\n",
3601 perf_stats.siga_ins); 3585 (long)atomic64_read(&perf_stats.siga_ins));
3602 _OUTP_IT("Number of SIGA out's issued : %lu\n", 3586 _OUTP_IT("Number of SIGA out's issued : %li\n",
3603 perf_stats.siga_outs); 3587 (long)atomic64_read(&perf_stats.siga_outs));
3604 _OUTP_IT("Number of PCIs caught : %lu\n", 3588 _OUTP_IT("Number of PCIs caught : %li\n",
3605 perf_stats.pcis); 3589 (long)atomic64_read(&perf_stats.pcis));
3606 _OUTP_IT("Number of adapter interrupts caught : %lu\n", 3590 _OUTP_IT("Number of adapter interrupts caught : %li\n",
3607 perf_stats.thinints); 3591 (long)atomic64_read(&perf_stats.thinints));
3608 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %lu\n", 3592 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %li\n",
3609 perf_stats.fast_reqs); 3593 (long)atomic64_read(&perf_stats.fast_reqs));
3610 _OUTP_IT("\n"); 3594 _OUTP_IT("\n");
3611 _OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n", 3595 _OUTP_IT("Number of inbound transfers : %li\n",
3612 perf_stats.inbound_time); 3596 (long)atomic64_read(&perf_stats.inbound_cnt));
3613 _OUTP_IT("Number of inbound transfers : %lu\n", 3597 _OUTP_IT("Number of do_QDIOs outbound : %li\n",
3614 perf_stats.inbound_cnt); 3598 (long)atomic64_read(&perf_stats.outbound_cnt));
3615 _OUTP_IT("Total time of all outbound do_QDIOs (us) : %lu\n", 3599#else /* CONFIG_64BIT */
3616 perf_stats.outbound_time); 3600 _OUTP_IT("Number of tasklet runs (total) : %i\n",
3617 _OUTP_IT("Number of do_QDIOs outbound : %lu\n", 3601 atomic_read(&perf_stats.tl_runs));
3618 perf_stats.outbound_cnt); 3602 _OUTP_IT("Inbound tasklet runs tried/retried : %i/%i\n",
3603 atomic_read(&perf_stats.inbound_tl_runs),
3604 atomic_read(&perf_stats.inbound_tl_runs_resched));
3605 _OUTP_IT("Inbound-thin tasklet runs tried/retried : %i/%i\n",
3606 atomic_read(&perf_stats.inbound_thin_tl_runs),
3607 atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
3608 _OUTP_IT("Outbound tasklet runs tried/retried : %i/%i\n",
3609 atomic_read(&perf_stats.outbound_tl_runs),
3610 atomic_read(&perf_stats.outbound_tl_runs_resched));
3611 _OUTP_IT("\n");
3612 _OUTP_IT("Number of SIGA sync's issued : %i\n",
3613 atomic_read(&perf_stats.siga_syncs));
3614 _OUTP_IT("Number of SIGA in's issued : %i\n",
3615 atomic_read(&perf_stats.siga_ins));
3616 _OUTP_IT("Number of SIGA out's issued : %i\n",
3617 atomic_read(&perf_stats.siga_outs));
3618 _OUTP_IT("Number of PCIs caught : %i\n",
3619 atomic_read(&perf_stats.pcis));
3620 _OUTP_IT("Number of adapter interrupts caught : %i\n",
3621 atomic_read(&perf_stats.thinints));
3622 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %i\n",
3623 atomic_read(&perf_stats.fast_reqs));
3624 _OUTP_IT("\n");
3625 _OUTP_IT("Number of inbound transfers : %i\n",
3626 atomic_read(&perf_stats.inbound_cnt));
3627 _OUTP_IT("Number of do_QDIOs outbound : %i\n",
3628 atomic_read(&perf_stats.outbound_cnt));
3629#endif /* CONFIG_64BIT */
3619 _OUTP_IT("\n"); 3630 _OUTP_IT("\n");
3620 3631
3621 return c; 3632 return c;
@@ -3642,8 +3653,6 @@ qdio_add_procfs_entry(void)
3642static void 3653static void
3643qdio_remove_procfs_entry(void) 3654qdio_remove_procfs_entry(void)
3644{ 3655{
3645 perf_stats.tl_runs=0;
3646
3647 if (!proc_perf_file_registration) /* means if it went ok earlier */ 3656 if (!proc_perf_file_registration) /* means if it went ok earlier */
3648 remove_proc_entry(QDIO_PERF,&proc_root); 3657 remove_proc_entry(QDIO_PERF,&proc_root);
3649} 3658}
@@ -3671,13 +3680,38 @@ qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count
3671 qdio_performance_stats = i; 3680 qdio_performance_stats = i;
3672 if (i==0) { 3681 if (i==0) {
3673 /* reset perf. stat. info */ 3682 /* reset perf. stat. info */
3674 i_p_nc = 0; 3683#ifdef CONFIG_64BIT
3675 i_p_c = 0; 3684 atomic64_set(&perf_stats.tl_runs, 0);
3676 ii_p_nc = 0; 3685 atomic64_set(&perf_stats.outbound_tl_runs, 0);
3677 ii_p_c = 0; 3686 atomic64_set(&perf_stats.inbound_tl_runs, 0);
3678 o_p_nc = 0; 3687 atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
3679 o_p_c = 0; 3688 atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
3680 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats)); 3689 atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
3690 0);
3691 atomic64_set(&perf_stats.siga_outs, 0);
3692 atomic64_set(&perf_stats.siga_ins, 0);
3693 atomic64_set(&perf_stats.siga_syncs, 0);
3694 atomic64_set(&perf_stats.pcis, 0);
3695 atomic64_set(&perf_stats.thinints, 0);
3696 atomic64_set(&perf_stats.fast_reqs, 0);
3697 atomic64_set(&perf_stats.outbound_cnt, 0);
3698 atomic64_set(&perf_stats.inbound_cnt, 0);
3699#else /* CONFIG_64BIT */
3700 atomic_set(&perf_stats.tl_runs, 0);
3701 atomic_set(&perf_stats.outbound_tl_runs, 0);
3702 atomic_set(&perf_stats.inbound_tl_runs, 0);
3703 atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
3704 atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
3705 atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
3706 atomic_set(&perf_stats.siga_outs, 0);
3707 atomic_set(&perf_stats.siga_ins, 0);
3708 atomic_set(&perf_stats.siga_syncs, 0);
3709 atomic_set(&perf_stats.pcis, 0);
3710 atomic_set(&perf_stats.thinints, 0);
3711 atomic_set(&perf_stats.fast_reqs, 0);
3712 atomic_set(&perf_stats.outbound_cnt, 0);
3713 atomic_set(&perf_stats.inbound_cnt, 0);
3714#endif /* CONFIG_64BIT */
3681 } 3715 }
3682 } else { 3716 } else {
3683 QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n"); 3717 QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ec9af72b2afc..2895392eaae4 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -406,21 +406,43 @@ do_clear_global_summary(void)
406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
407 407
408struct qdio_perf_stats { 408struct qdio_perf_stats {
409 unsigned long tl_runs; 409#ifdef CONFIG_64BIT
410 410 atomic64_t tl_runs;
411 unsigned long siga_outs; 411 atomic64_t outbound_tl_runs;
412 unsigned long siga_ins; 412 atomic64_t outbound_tl_runs_resched;
413 unsigned long siga_syncs; 413 atomic64_t inbound_tl_runs;
414 unsigned long pcis; 414 atomic64_t inbound_tl_runs_resched;
415 unsigned long thinints; 415 atomic64_t inbound_thin_tl_runs;
416 unsigned long fast_reqs; 416 atomic64_t inbound_thin_tl_runs_resched;
417 417
418 __u64 start_time_outbound; 418 atomic64_t siga_outs;
419 unsigned long outbound_cnt; 419 atomic64_t siga_ins;
420 unsigned long outbound_time; 420 atomic64_t siga_syncs;
421 __u64 start_time_inbound; 421 atomic64_t pcis;
422 unsigned long inbound_cnt; 422 atomic64_t thinints;
423 unsigned long inbound_time; 423 atomic64_t fast_reqs;
424
425 atomic64_t outbound_cnt;
426 atomic64_t inbound_cnt;
427#else /* CONFIG_64BIT */
428 atomic_t tl_runs;
429 atomic_t outbound_tl_runs;
430 atomic_t outbound_tl_runs_resched;
431 atomic_t inbound_tl_runs;
432 atomic_t inbound_tl_runs_resched;
433 atomic_t inbound_thin_tl_runs;
434 atomic_t inbound_thin_tl_runs_resched;
435
436 atomic_t siga_outs;
437 atomic_t siga_ins;
438 atomic_t siga_syncs;
439 atomic_t pcis;
440 atomic_t thinints;
441 atomic_t fast_reqs;
442
443 atomic_t outbound_cnt;
444 atomic_t inbound_cnt;
445#endif /* CONFIG_64BIT */
424}; 446};
425 447
426/* unlikely as the later the better */ 448/* unlikely as the later the better */
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 84b108d7c7fd..b34eb82edd98 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -288,6 +288,7 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func)
288 */ 288 */
289#define IF_NAME_LEN 16 289#define IF_NAME_LEN 16
290#define QETH_TX_TIMEOUT 100 * HZ 290#define QETH_TX_TIMEOUT 100 * HZ
291#define QETH_RCD_TIMEOUT 60 * HZ
291#define QETH_HEADER_SIZE 32 292#define QETH_HEADER_SIZE 32
292#define MAX_PORTNO 15 293#define MAX_PORTNO 15
293#define QETH_FAKE_LL_LEN_ETH ETH_HLEN 294#define QETH_FAKE_LL_LEN_ETH ETH_HLEN
@@ -582,6 +583,8 @@ enum qeth_channel_states {
582 CH_STATE_ACTIVATING, 583 CH_STATE_ACTIVATING,
583 CH_STATE_HALTED, 584 CH_STATE_HALTED,
584 CH_STATE_STOPPED, 585 CH_STATE_STOPPED,
586 CH_STATE_RCD,
587 CH_STATE_RCD_DONE,
585}; 588};
586/** 589/**
587 * card state machine 590 * card state machine
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index ad7792dc1a04..6fd8870551d3 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -315,7 +315,8 @@ qeth_alloc_card(void)
315} 315}
316 316
317static long 317static long
318__qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb) 318__qeth_check_irb_error(struct ccw_device *cdev, unsigned long intparm,
319 struct irb *irb)
319{ 320{
320 if (!IS_ERR(irb)) 321 if (!IS_ERR(irb))
321 return 0; 322 return 0;
@@ -330,6 +331,14 @@ __qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb)
330 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); 331 PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id);
331 QETH_DBF_TEXT(trace, 2, "ckirberr"); 332 QETH_DBF_TEXT(trace, 2, "ckirberr");
332 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT); 333 QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT);
334 if (intparm == QETH_RCD_PARM) {
335 struct qeth_card *card = CARD_FROM_CDEV(cdev);
336
337 if (card && (card->data.ccwdev == cdev)) {
338 card->data.state = CH_STATE_DOWN;
339 wake_up(&card->wait_q);
340 }
341 }
333 break; 342 break;
334 default: 343 default:
335 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), 344 PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb),
@@ -401,7 +410,7 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
401 410
402 QETH_DBF_TEXT(trace,5,"irq"); 411 QETH_DBF_TEXT(trace,5,"irq");
403 412
404 if (__qeth_check_irb_error(cdev, irb)) 413 if (__qeth_check_irb_error(cdev, intparm, irb))
405 return; 414 return;
406 cstat = irb->scsw.cstat; 415 cstat = irb->scsw.cstat;
407 dstat = irb->scsw.dstat; 416 dstat = irb->scsw.dstat;
@@ -429,7 +438,8 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
429 channel->state = CH_STATE_HALTED; 438 channel->state = CH_STATE_HALTED;
430 439
431 /*let's wake up immediately on data channel*/ 440 /*let's wake up immediately on data channel*/
432 if ((channel == &card->data) && (intparm != 0)) 441 if ((channel == &card->data) && (intparm != 0) &&
442 (intparm != QETH_RCD_PARM))
433 goto out; 443 goto out;
434 444
435 if (intparm == QETH_CLEAR_CHANNEL_PARM) { 445 if (intparm == QETH_CLEAR_CHANNEL_PARM) {
@@ -453,6 +463,10 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
453 HEXDUMP16(WARN,"irb: ",irb); 463 HEXDUMP16(WARN,"irb: ",irb);
454 HEXDUMP16(WARN,"sense data: ",irb->ecw); 464 HEXDUMP16(WARN,"sense data: ",irb->ecw);
455 } 465 }
466 if (intparm == QETH_RCD_PARM) {
467 channel->state = CH_STATE_DOWN;
468 goto out;
469 }
456 rc = qeth_get_problem(cdev,irb); 470 rc = qeth_get_problem(cdev,irb);
457 if (rc) { 471 if (rc) {
458 qeth_schedule_recovery(card); 472 qeth_schedule_recovery(card);
@@ -460,6 +474,10 @@ qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
460 } 474 }
461 } 475 }
462 476
477 if (intparm == QETH_RCD_PARM) {
478 channel->state = CH_STATE_RCD_DONE;
479 goto out;
480 }
463 if (intparm) { 481 if (intparm) {
464 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); 482 buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
465 buffer->state = BUF_STATE_PROCESSED; 483 buffer->state = BUF_STATE_PROCESSED;
@@ -1204,6 +1222,54 @@ qeth_probe_device(struct ccwgroup_device *gdev)
1204} 1222}
1205 1223
1206 1224
1225static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
1226 int *length)
1227{
1228 struct ciw *ciw;
1229 char *rcd_buf;
1230 int ret;
1231 struct qeth_channel *channel = &card->data;
1232 unsigned long flags;
1233
1234 /*
1235 * scan for RCD command in extended SenseID data
1236 */
1237 ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1238 if (!ciw || ciw->cmd == 0)
1239 return -EOPNOTSUPP;
1240 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
1241 if (!rcd_buf)
1242 return -ENOMEM;
1243
1244 channel->ccw.cmd_code = ciw->cmd;
1245 channel->ccw.cda = (__u32) __pa (rcd_buf);
1246 channel->ccw.count = ciw->count;
1247 channel->ccw.flags = CCW_FLAG_SLI;
1248 channel->state = CH_STATE_RCD;
1249 spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
1250 ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
1251 QETH_RCD_PARM, LPM_ANYPATH, 0,
1252 QETH_RCD_TIMEOUT);
1253 spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
1254 if (!ret)
1255 wait_event(card->wait_q,
1256 (channel->state == CH_STATE_RCD_DONE ||
1257 channel->state == CH_STATE_DOWN));
1258 if (channel->state == CH_STATE_DOWN)
1259 ret = -EIO;
1260 else
1261 channel->state = CH_STATE_DOWN;
1262 if (ret) {
1263 kfree(rcd_buf);
1264 *buffer = NULL;
1265 *length = 0;
1266 } else {
1267 *length = ciw->count;
1268 *buffer = rcd_buf;
1269 }
1270 return ret;
1271}
1272
1207static int 1273static int
1208qeth_get_unitaddr(struct qeth_card *card) 1274qeth_get_unitaddr(struct qeth_card *card)
1209{ 1275{
@@ -1212,9 +1278,9 @@ qeth_get_unitaddr(struct qeth_card *card)
1212 int rc; 1278 int rc;
1213 1279
1214 QETH_DBF_TEXT(setup, 2, "getunit"); 1280 QETH_DBF_TEXT(setup, 2, "getunit");
1215 rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length); 1281 rc = qeth_read_conf_data(card, (void **) &prcd, &length);
1216 if (rc) { 1282 if (rc) {
1217 PRINT_ERR("read_conf_data for device %s returned %i\n", 1283 PRINT_ERR("qeth_read_conf_data for device %s returned %i\n",
1218 CARD_DDEV_ID(card), rc); 1284 CARD_DDEV_ID(card), rc);
1219 return rc; 1285 return rc;
1220 } 1286 }
@@ -1223,6 +1289,7 @@ qeth_get_unitaddr(struct qeth_card *card)
1223 card->info.cula = prcd[63]; 1289 card->info.cula = prcd[63];
1224 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && 1290 card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
1225 (prcd[0x11] == _ascebc['M'])); 1291 (prcd[0x11] == _ascebc['M']));
1292 kfree(prcd);
1226 return 0; 1293 return 0;
1227} 1294}
1228 1295
diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h
index 0477c47471c5..d74bc43da72a 100644
--- a/drivers/s390/net/qeth_mpc.h
+++ b/drivers/s390/net/qeth_mpc.h
@@ -37,6 +37,7 @@ extern unsigned char IPA_PDU_HEADER[];
37 37
38#define QETH_CLEAR_CHANNEL_PARM -10 38#define QETH_CLEAR_CHANNEL_PARM -10
39#define QETH_HALT_CHANNEL_PARM -11 39#define QETH_HALT_CHANNEL_PARM -11
40#define QETH_RCD_PARM -12
40 41
41/*****************************************************************************/ 42/*****************************************************************************/
42/* IP Assist related definitions */ 43/* IP Assist related definitions */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 421da1e7c0ea..c1f2d4b14c2b 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -186,7 +186,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout)
186{ 186{
187 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 187 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
188 fsf_req->timer.data = (unsigned long) fsf_req->adapter; 188 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
189 fsf_req->timer.expires = timeout; 189 fsf_req->timer.expires = jiffies + timeout;
190 add_timer(&fsf_req->timer); 190 add_timer(&fsf_req->timer);
191} 191}
192 192
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ef16f7ca4bb1..4c0a59afd5c8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -299,9 +299,10 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
299 } 299 }
300 300
301 /* log additional information provided by FSF (if any) */ 301 /* log additional information provided by FSF (if any) */
302 if (unlikely(qtcb->header.log_length)) { 302 if (likely(qtcb->header.log_length)) {
303 /* do not trust them ;-) */ 303 /* do not trust them ;-) */
304 if (qtcb->header.log_start > sizeof(struct fsf_qtcb)) { 304 if (unlikely(qtcb->header.log_start >
305 sizeof(struct fsf_qtcb))) {
305 ZFCP_LOG_NORMAL 306 ZFCP_LOG_NORMAL
306 ("bug: ULP (FSF logging) log data starts " 307 ("bug: ULP (FSF logging) log data starts "
307 "beyond end of packet header. Ignored. " 308 "beyond end of packet header. Ignored. "
@@ -310,8 +311,9 @@ zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *fsf_req)
310 sizeof(struct fsf_qtcb)); 311 sizeof(struct fsf_qtcb));
311 goto forget_log; 312 goto forget_log;
312 } 313 }
313 if ((size_t) (qtcb->header.log_start + qtcb->header.log_length) 314 if (unlikely((size_t) (qtcb->header.log_start +
314 > sizeof(struct fsf_qtcb)) { 315 qtcb->header.log_length) >
316 sizeof(struct fsf_qtcb))) {
315 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends " 317 ZFCP_LOG_NORMAL("bug: ULP (FSF logging) log data ends "
316 "beyond end of packet header. Ignored. " 318 "beyond end of packet header. Ignored. "
317 "(start=%i, length=%i, size=%li)\n", 319 "(start=%i, length=%i, size=%li)\n",