aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile4
-rw-r--r--drivers/s390/cio/airq.c45
-rw-r--r--drivers/s390/cio/chp.c116
-rw-r--r--drivers/s390/cio/chp.h15
-rw-r--r--drivers/s390/cio/chsc.c379
-rw-r--r--drivers/s390/cio/chsc.h26
-rw-r--r--drivers/s390/cio/chsc_sch.c820
-rw-r--r--drivers/s390/cio/chsc_sch.h13
-rw-r--r--drivers/s390/cio/cio.c282
-rw-r--r--drivers/s390/cio/cio.h14
-rw-r--r--drivers/s390/cio/cmf.c20
-rw-r--r--drivers/s390/cio/css.c283
-rw-r--r--drivers/s390/cio/css.h49
-rw-r--r--drivers/s390/cio/device.c476
-rw-r--r--drivers/s390/cio/device.h7
-rw-r--r--drivers/s390/cio/device_fsm.c210
-rw-r--r--drivers/s390/cio/device_id.c16
-rw-r--r--drivers/s390/cio/device_ops.c134
-rw-r--r--drivers/s390/cio/device_pgid.c26
-rw-r--r--drivers/s390/cio/device_status.c133
-rw-r--r--drivers/s390/cio/fcx.c350
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/io_sch.h48
-rw-r--r--drivers/s390/cio/ioasm.h2
-rw-r--r--drivers/s390/cio/isc.c68
-rw-r--r--drivers/s390/cio/itcw.c327
-rw-r--r--drivers/s390/cio/qdio.c35
-rw-r--r--drivers/s390/cio/qdio.h6
-rw-r--r--drivers/s390/cio/schid.h26
-rw-r--r--drivers/s390/cio/scsw.c843
30 files changed, 3754 insertions, 1021 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index cfaf77b320f5..91e9e3f3073a 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -2,9 +2,11 @@
2# Makefile for the S/390 common i/o drivers 2# Makefile for the S/390 common i/o drivers
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o scsw.o \
6 fcx.o itcw.o
6ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
7ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
8obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
10obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
9obj-$(CONFIG_CCWGROUP) += ccwgroup.o 11obj-$(CONFIG_CCWGROUP) += ccwgroup.o
10obj-$(CONFIG_QDIO) += qdio.o 12obj-$(CONFIG_QDIO) += qdio.o
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index b7a07a866291..fe6cea15bbaf 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -15,6 +15,7 @@
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16 16
17#include <asm/airq.h> 17#include <asm/airq.h>
18#include <asm/isc.h>
18 19
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -33,15 +34,15 @@ struct airq_t {
33 void *drv_data; 34 void *drv_data;
34}; 35};
35 36
36static union indicator_t indicators; 37static union indicator_t indicators[MAX_ISC];
37static struct airq_t *airqs[NR_AIRQS]; 38static struct airq_t *airqs[MAX_ISC][NR_AIRQS];
38 39
39static int register_airq(struct airq_t *airq) 40static int register_airq(struct airq_t *airq, u8 isc)
40{ 41{
41 int i; 42 int i;
42 43
43 for (i = 0; i < NR_AIRQS; i++) 44 for (i = 0; i < NR_AIRQS; i++)
44 if (!cmpxchg(&airqs[i], NULL, airq)) 45 if (!cmpxchg(&airqs[isc][i], NULL, airq))
45 return i; 46 return i;
46 return -ENOMEM; 47 return -ENOMEM;
47} 48}
@@ -50,18 +51,21 @@ static int register_airq(struct airq_t *airq)
50 * s390_register_adapter_interrupt() - register adapter interrupt handler 51 * s390_register_adapter_interrupt() - register adapter interrupt handler
51 * @handler: adapter handler to be registered 52 * @handler: adapter handler to be registered
52 * @drv_data: driver data passed with each call to the handler 53 * @drv_data: driver data passed with each call to the handler
54 * @isc: isc for which the handler should be called
53 * 55 *
54 * Returns: 56 * Returns:
55 * Pointer to the indicator to be used on success 57 * Pointer to the indicator to be used on success
56 * ERR_PTR() if registration failed 58 * ERR_PTR() if registration failed
57 */ 59 */
58void *s390_register_adapter_interrupt(adapter_int_handler_t handler, 60void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
59 void *drv_data) 61 void *drv_data, u8 isc)
60{ 62{
61 struct airq_t *airq; 63 struct airq_t *airq;
62 char dbf_txt[16]; 64 char dbf_txt[16];
63 int ret; 65 int ret;
64 66
67 if (isc > MAX_ISC)
68 return ERR_PTR(-EINVAL);
65 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL); 69 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
66 if (!airq) { 70 if (!airq) {
67 ret = -ENOMEM; 71 ret = -ENOMEM;
@@ -69,34 +73,35 @@ void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
69 } 73 }
70 airq->handler = handler; 74 airq->handler = handler;
71 airq->drv_data = drv_data; 75 airq->drv_data = drv_data;
72 ret = register_airq(airq); 76
73 if (ret < 0) 77 ret = register_airq(airq, isc);
74 kfree(airq);
75out: 78out:
76 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret); 79 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
77 CIO_TRACE_EVENT(4, dbf_txt); 80 CIO_TRACE_EVENT(4, dbf_txt);
78 if (ret < 0) 81 if (ret < 0) {
82 kfree(airq);
79 return ERR_PTR(ret); 83 return ERR_PTR(ret);
80 else 84 } else
81 return &indicators.byte[ret]; 85 return &indicators[isc].byte[ret];
82} 86}
83EXPORT_SYMBOL(s390_register_adapter_interrupt); 87EXPORT_SYMBOL(s390_register_adapter_interrupt);
84 88
85/** 89/**
86 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler 90 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
87 * @ind: indicator for which the handler is to be unregistered 91 * @ind: indicator for which the handler is to be unregistered
92 * @isc: interruption subclass
88 */ 93 */
89void s390_unregister_adapter_interrupt(void *ind) 94void s390_unregister_adapter_interrupt(void *ind, u8 isc)
90{ 95{
91 struct airq_t *airq; 96 struct airq_t *airq;
92 char dbf_txt[16]; 97 char dbf_txt[16];
93 int i; 98 int i;
94 99
95 i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]); 100 i = (int) ((addr_t) ind) - ((addr_t) &indicators[isc].byte[0]);
96 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i); 101 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
97 CIO_TRACE_EVENT(4, dbf_txt); 102 CIO_TRACE_EVENT(4, dbf_txt);
98 indicators.byte[i] = 0; 103 indicators[isc].byte[i] = 0;
99 airq = xchg(&airqs[i], NULL); 104 airq = xchg(&airqs[isc][i], NULL);
100 /* 105 /*
101 * Allow interrupts to complete. This will ensure that the airq handle 106 * Allow interrupts to complete. This will ensure that the airq handle
102 * is no longer referenced by any interrupt handler. 107 * is no longer referenced by any interrupt handler.
@@ -108,7 +113,7 @@ EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
108 113
109#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8)) 114#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
110 115
111void do_adapter_IO(void) 116void do_adapter_IO(u8 isc)
112{ 117{
113 int w; 118 int w;
114 int i; 119 int i;
@@ -120,22 +125,22 @@ void do_adapter_IO(void)
120 * fetch operations. 125 * fetch operations.
121 */ 126 */
122 for (w = 0; w < NR_AIRQ_WORDS; w++) { 127 for (w = 0; w < NR_AIRQ_WORDS; w++) {
123 word = indicators.word[w]; 128 word = indicators[isc].word[w];
124 i = w * NR_AIRQS_PER_WORD; 129 i = w * NR_AIRQS_PER_WORD;
125 /* 130 /*
126 * Check bytes within word for active indicators. 131 * Check bytes within word for active indicators.
127 */ 132 */
128 while (word) { 133 while (word) {
129 if (word & INDICATOR_MASK) { 134 if (word & INDICATOR_MASK) {
130 airq = airqs[i]; 135 airq = airqs[isc][i];
131 if (likely(airq)) 136 if (likely(airq))
132 airq->handler(&indicators.byte[i], 137 airq->handler(&indicators[isc].byte[i],
133 airq->drv_data); 138 airq->drv_data);
134 else 139 else
135 /* 140 /*
136 * Reset ill-behaved indicator. 141 * Reset ill-behaved indicator.
137 */ 142 */
138 indicators.byte[i] = 0; 143 indicators[isc].byte[i] = 0;
139 } 144 }
140 word <<= 8; 145 word <<= 8;
141 i++; 146 i++;
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 297cdceb0ca4..db00b0591733 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -18,6 +18,7 @@
18#include <asm/chpid.h> 18#include <asm/chpid.h>
19#include <asm/sclp.h> 19#include <asm/sclp.h>
20 20
21#include "../s390mach.h"
21#include "cio.h" 22#include "cio.h"
22#include "css.h" 23#include "css.h"
23#include "ioasm.h" 24#include "ioasm.h"
@@ -94,6 +95,7 @@ u8 chp_get_sch_opm(struct subchannel *sch)
94 } 95 }
95 return opm; 96 return opm;
96} 97}
98EXPORT_SYMBOL_GPL(chp_get_sch_opm);
97 99
98/** 100/**
99 * chp_is_registered - check if a channel-path is registered 101 * chp_is_registered - check if a channel-path is registered
@@ -121,11 +123,8 @@ static int s390_vary_chpid(struct chp_id chpid, int on)
121 CIO_TRACE_EVENT(2, dbf_text); 123 CIO_TRACE_EVENT(2, dbf_text);
122 124
123 status = chp_get_status(chpid); 125 status = chp_get_status(chpid);
124 if (!on && !status) { 126 if (!on && !status)
125 printk(KERN_ERR "cio: chpid %x.%02x is already offline\n", 127 return 0;
126 chpid.cssid, chpid.id);
127 return -EINVAL;
128 }
129 128
130 set_chp_logically_online(chpid, on); 129 set_chp_logically_online(chpid, on);
131 chsc_chp_vary(chpid, on); 130 chsc_chp_vary(chpid, on);
@@ -141,21 +140,14 @@ static ssize_t chp_measurement_chars_read(struct kobject *kobj,
141{ 140{
142 struct channel_path *chp; 141 struct channel_path *chp;
143 struct device *device; 142 struct device *device;
144 unsigned int size;
145 143
146 device = container_of(kobj, struct device, kobj); 144 device = container_of(kobj, struct device, kobj);
147 chp = to_channelpath(device); 145 chp = to_channelpath(device);
148 if (!chp->cmg_chars) 146 if (!chp->cmg_chars)
149 return 0; 147 return 0;
150 148
151 size = sizeof(struct cmg_chars); 149 return memory_read_from_buffer(buf, count, &off,
152 150 chp->cmg_chars, sizeof(struct cmg_chars));
153 if (off > size)
154 return 0;
155 if (off + count > size)
156 count = size - off;
157 memcpy(buf, chp->cmg_chars + off, count);
158 return count;
159} 151}
160 152
161static struct bin_attribute chp_measurement_chars_attr = { 153static struct bin_attribute chp_measurement_chars_attr = {
@@ -405,7 +397,7 @@ int chp_new(struct chp_id chpid)
405 chpid.id); 397 chpid.id);
406 398
407 /* Obtain channel path description and fill it in. */ 399 /* Obtain channel path description and fill it in. */
408 ret = chsc_determine_channel_path_description(chpid, &chp->desc); 400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
409 if (ret) 401 if (ret)
410 goto out_free; 402 goto out_free;
411 if ((chp->desc.flags & 0x80) == 0) { 403 if ((chp->desc.flags & 0x80) == 0) {
@@ -413,8 +405,7 @@ int chp_new(struct chp_id chpid)
413 goto out_free; 405 goto out_free;
414 } 406 }
415 /* Get channel-measurement characteristics. */ 407 /* Get channel-measurement characteristics. */
416 if (css_characteristics_avail && css_chsc_characteristics.scmc 408 if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
417 && css_chsc_characteristics.secm) {
418 ret = chsc_get_channel_measurement_chars(chp); 409 ret = chsc_get_channel_measurement_chars(chp);
419 if (ret) 410 if (ret)
420 goto out_free; 411 goto out_free;
@@ -476,26 +467,74 @@ void *chp_get_chp_desc(struct chp_id chpid)
476 467
477/** 468/**
478 * chp_process_crw - process channel-path status change 469 * chp_process_crw - process channel-path status change
479 * @id: channel-path ID number 470 * @crw0: channel report-word to handler
480 * @status: non-zero if channel-path has become available, zero otherwise 471 * @crw1: second channel-report word (always NULL)
472 * @overflow: crw overflow indication
481 * 473 *
482 * Handle channel-report-words indicating that the status of a channel-path 474 * Handle channel-report-words indicating that the status of a channel-path
483 * has changed. 475 * has changed.
484 */ 476 */
485void chp_process_crw(int id, int status) 477static void chp_process_crw(struct crw *crw0, struct crw *crw1,
478 int overflow)
486{ 479{
487 struct chp_id chpid; 480 struct chp_id chpid;
488 481
482 if (overflow) {
483 css_schedule_eval_all();
484 return;
485 }
486 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
487 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
488 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
489 crw0->erc, crw0->rsid);
490 /*
491 * Check for solicited machine checks. These are
492 * created by reset channel path and need not be
493 * handled here.
494 */
495 if (crw0->slct) {
496 CIO_CRW_EVENT(2, "solicited machine check for "
497 "channel path %02X\n", crw0->rsid);
498 return;
499 }
489 chp_id_init(&chpid); 500 chp_id_init(&chpid);
490 chpid.id = id; 501 chpid.id = crw0->rsid;
491 if (status) { 502 switch (crw0->erc) {
503 case CRW_ERC_IPARM: /* Path has come. */
492 if (!chp_is_registered(chpid)) 504 if (!chp_is_registered(chpid))
493 chp_new(chpid); 505 chp_new(chpid);
494 chsc_chp_online(chpid); 506 chsc_chp_online(chpid);
495 } else 507 break;
508 case CRW_ERC_PERRI: /* Path has gone. */
509 case CRW_ERC_PERRN:
496 chsc_chp_offline(chpid); 510 chsc_chp_offline(chpid);
511 break;
512 default:
513 CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
514 crw0->erc);
515 }
497} 516}
498 517
518int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
519{
520 int i;
521 int mask;
522
523 for (i = 0; i < 8; i++) {
524 mask = 0x80 >> i;
525 if (!(ssd->path_mask & mask))
526 continue;
527 if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
528 continue;
529 if ((ssd->fla_valid_mask & mask) &&
530 ((ssd->fla[i] & link->fla_mask) != link->fla))
531 continue;
532 return mask;
533 }
534 return 0;
535}
536EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
537
499static inline int info_bit_num(struct chp_id id) 538static inline int info_bit_num(struct chp_id id)
500{ 539{
501 return id.id + id.cssid * (__MAX_CHPID + 1); 540 return id.id + id.cssid * (__MAX_CHPID + 1);
@@ -575,6 +614,7 @@ static void cfg_func(struct work_struct *work)
575{ 614{
576 struct chp_id chpid; 615 struct chp_id chpid;
577 enum cfg_task_t t; 616 enum cfg_task_t t;
617 int rc;
578 618
579 mutex_lock(&cfg_lock); 619 mutex_lock(&cfg_lock);
580 t = cfg_none; 620 t = cfg_none;
@@ -589,14 +629,24 @@ static void cfg_func(struct work_struct *work)
589 629
590 switch (t) { 630 switch (t) {
591 case cfg_configure: 631 case cfg_configure:
592 sclp_chp_configure(chpid); 632 rc = sclp_chp_configure(chpid);
593 info_expire(); 633 if (rc)
594 chsc_chp_online(chpid); 634 CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
635 "%d\n", chpid.cssid, chpid.id, rc);
636 else {
637 info_expire();
638 chsc_chp_online(chpid);
639 }
595 break; 640 break;
596 case cfg_deconfigure: 641 case cfg_deconfigure:
597 sclp_chp_deconfigure(chpid); 642 rc = sclp_chp_deconfigure(chpid);
598 info_expire(); 643 if (rc)
599 chsc_chp_offline(chpid); 644 CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
645 "%d\n", chpid.cssid, chpid.id, rc);
646 else {
647 info_expire();
648 chsc_chp_offline(chpid);
649 }
600 break; 650 break;
601 case cfg_none: 651 case cfg_none:
602 /* Get updated information after last change. */ 652 /* Get updated information after last change. */
@@ -654,10 +704,16 @@ static int cfg_wait_idle(void)
654static int __init chp_init(void) 704static int __init chp_init(void)
655{ 705{
656 struct chp_id chpid; 706 struct chp_id chpid;
707 int ret;
657 708
709 ret = s390_register_crw_handler(CRW_RSC_CPATH, chp_process_crw);
710 if (ret)
711 return ret;
658 chp_wq = create_singlethread_workqueue("cio_chp"); 712 chp_wq = create_singlethread_workqueue("cio_chp");
659 if (!chp_wq) 713 if (!chp_wq) {
714 s390_unregister_crw_handler(CRW_RSC_CPATH);
660 return -ENOMEM; 715 return -ENOMEM;
716 }
661 INIT_WORK(&cfg_work, cfg_func); 717 INIT_WORK(&cfg_work, cfg_func);
662 init_waitqueue_head(&cfg_wait_queue); 718 init_waitqueue_head(&cfg_wait_queue);
663 if (info_update()) 719 if (info_update())
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 65286563c592..26c3d2246176 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -12,12 +12,24 @@
12#include <linux/device.h> 12#include <linux/device.h>
13#include <asm/chpid.h> 13#include <asm/chpid.h>
14#include "chsc.h" 14#include "chsc.h"
15#include "css.h"
15 16
16#define CHP_STATUS_STANDBY 0 17#define CHP_STATUS_STANDBY 0
17#define CHP_STATUS_CONFIGURED 1 18#define CHP_STATUS_CONFIGURED 1
18#define CHP_STATUS_RESERVED 2 19#define CHP_STATUS_RESERVED 2
19#define CHP_STATUS_NOT_RECOGNIZED 3 20#define CHP_STATUS_NOT_RECOGNIZED 3
20 21
22#define CHP_ONLINE 0
23#define CHP_OFFLINE 1
24#define CHP_VARY_ON 2
25#define CHP_VARY_OFF 3
26
27struct chp_link {
28 struct chp_id chpid;
29 u32 fla_mask;
30 u16 fla;
31};
32
21static inline int chp_test_bit(u8 *bitmap, int num) 33static inline int chp_test_bit(u8 *bitmap, int num)
22{ 34{
23 int byte = num >> 3; 35 int byte = num >> 3;
@@ -42,12 +54,11 @@ int chp_get_status(struct chp_id chpid);
42u8 chp_get_sch_opm(struct subchannel *sch); 54u8 chp_get_sch_opm(struct subchannel *sch);
43int chp_is_registered(struct chp_id chpid); 55int chp_is_registered(struct chp_id chpid);
44void *chp_get_chp_desc(struct chp_id chpid); 56void *chp_get_chp_desc(struct chp_id chpid);
45void chp_process_crw(int id, int available);
46void chp_remove_cmg_attr(struct channel_path *chp); 57void chp_remove_cmg_attr(struct channel_path *chp);
47int chp_add_cmg_attr(struct channel_path *chp); 58int chp_add_cmg_attr(struct channel_path *chp);
48int chp_new(struct chp_id chpid); 59int chp_new(struct chp_id chpid);
49void chp_cfg_schedule(struct chp_id chpid, int configure); 60void chp_cfg_schedule(struct chp_id chpid, int configure);
50void chp_cfg_cancel_deconfigure(struct chp_id chpid); 61void chp_cfg_cancel_deconfigure(struct chp_id chpid);
51int chp_info_get_status(struct chp_id chpid); 62int chp_info_get_status(struct chp_id chpid);
52 63int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
53#endif /* S390_CHP_H */ 64#endif /* S390_CHP_H */
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 5de86908b0d0..65264a38057d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999,2008
6 * IBM Corporation
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -16,7 +15,9 @@
16 15
17#include <asm/cio.h> 16#include <asm/cio.h>
18#include <asm/chpid.h> 17#include <asm/chpid.h>
18#include <asm/chsc.h>
19 19
20#include "../s390mach.h"
20#include "css.h" 21#include "css.h"
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -127,77 +128,12 @@ out_free:
127 return ret; 128 return ret;
128} 129}
129 130
130static int check_for_io_on_path(struct subchannel *sch, int mask)
131{
132 int cc;
133
134 cc = stsch(sch->schid, &sch->schib);
135 if (cc)
136 return 0;
137 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
138 return 1;
139 return 0;
140}
141
142static void terminate_internal_io(struct subchannel *sch)
143{
144 if (cio_clear(sch)) {
145 /* Recheck device in case clear failed. */
146 sch->lpm = 0;
147 if (device_trigger_verify(sch) != 0)
148 css_schedule_eval(sch->schid);
149 return;
150 }
151 /* Request retry of internal operation. */
152 device_set_intretry(sch);
153 /* Call handler. */
154 if (sch->driver && sch->driver->termination)
155 sch->driver->termination(sch);
156}
157
158static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 131static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
159{ 132{
160 int j;
161 int mask;
162 struct chp_id *chpid = data;
163 struct schib schib;
164
165 for (j = 0; j < 8; j++) {
166 mask = 0x80 >> j;
167 if ((sch->schib.pmcw.pim & mask) &&
168 (sch->schib.pmcw.chpid[j] == chpid->id))
169 break;
170 }
171 if (j >= 8)
172 return 0;
173
174 spin_lock_irq(sch->lock); 133 spin_lock_irq(sch->lock);
175 134 if (sch->driver && sch->driver->chp_event)
176 stsch(sch->schid, &schib); 135 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
177 if (!css_sch_is_valid(&schib))
178 goto out_unreg;
179 memcpy(&sch->schib, &schib, sizeof(struct schib));
180 /* Check for single path devices. */
181 if (sch->schib.pmcw.pim == 0x80)
182 goto out_unreg;
183
184 if (check_for_io_on_path(sch, mask)) {
185 if (device_is_online(sch))
186 device_kill_io(sch);
187 else {
188 terminate_internal_io(sch);
189 /* Re-start path verification. */
190 if (sch->driver && sch->driver->verify)
191 sch->driver->verify(sch);
192 }
193 } else {
194 /* trigger path verification. */
195 if (sch->driver && sch->driver->verify)
196 sch->driver->verify(sch);
197 else if (sch->lpm == mask)
198 goto out_unreg; 136 goto out_unreg;
199 }
200
201 spin_unlock_irq(sch->lock); 137 spin_unlock_irq(sch->lock);
202 return 0; 138 return 0;
203 139
@@ -211,15 +147,18 @@ out_unreg:
211void chsc_chp_offline(struct chp_id chpid) 147void chsc_chp_offline(struct chp_id chpid)
212{ 148{
213 char dbf_txt[15]; 149 char dbf_txt[15];
150 struct chp_link link;
214 151
215 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 152 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
216 CIO_TRACE_EVENT(2, dbf_txt); 153 CIO_TRACE_EVENT(2, dbf_txt);
217 154
218 if (chp_get_status(chpid) <= 0) 155 if (chp_get_status(chpid) <= 0)
219 return; 156 return;
157 memset(&link, 0, sizeof(struct chp_link));
158 link.chpid = chpid;
220 /* Wait until previous actions have settled. */ 159 /* Wait until previous actions have settled. */
221 css_wait_for_slow_path(); 160 css_wait_for_slow_path();
222 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid); 161 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
223} 162}
224 163
225static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 164static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
@@ -242,67 +181,25 @@ static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
242 return 0; 181 return 0;
243} 182}
244 183
245struct res_acc_data {
246 struct chp_id chpid;
247 u32 fla_mask;
248 u16 fla;
249};
250
251static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 struct res_acc_data *data)
253{
254 int i;
255 int mask;
256
257 for (i = 0; i < 8; i++) {
258 mask = 0x80 >> i;
259 if (!(ssd->path_mask & mask))
260 continue;
261 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
262 continue;
263 if ((ssd->fla_valid_mask & mask) &&
264 ((ssd->fla[i] & data->fla_mask) != data->fla))
265 continue;
266 return mask;
267 }
268 return 0;
269}
270
271static int __s390_process_res_acc(struct subchannel *sch, void *data) 184static int __s390_process_res_acc(struct subchannel *sch, void *data)
272{ 185{
273 int chp_mask, old_lpm;
274 struct res_acc_data *res_data = data;
275
276 spin_lock_irq(sch->lock); 186 spin_lock_irq(sch->lock);
277 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 187 if (sch->driver && sch->driver->chp_event)
278 if (chp_mask == 0) 188 sch->driver->chp_event(sch, data, CHP_ONLINE);
279 goto out;
280 if (stsch(sch->schid, &sch->schib))
281 goto out;
282 old_lpm = sch->lpm;
283 sch->lpm = ((sch->schib.pmcw.pim &
284 sch->schib.pmcw.pam &
285 sch->schib.pmcw.pom)
286 | chp_mask) & sch->opm;
287 if (!old_lpm && sch->lpm)
288 device_trigger_reprobe(sch);
289 else if (sch->driver && sch->driver->verify)
290 sch->driver->verify(sch);
291out:
292 spin_unlock_irq(sch->lock); 189 spin_unlock_irq(sch->lock);
293 190
294 return 0; 191 return 0;
295} 192}
296 193
297static void s390_process_res_acc (struct res_acc_data *res_data) 194static void s390_process_res_acc(struct chp_link *link)
298{ 195{
299 char dbf_txt[15]; 196 char dbf_txt[15];
300 197
301 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid, 198 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
302 res_data->chpid.id); 199 link->chpid.id);
303 CIO_TRACE_EVENT( 2, dbf_txt); 200 CIO_TRACE_EVENT( 2, dbf_txt);
304 if (res_data->fla != 0) { 201 if (link->fla != 0) {
305 sprintf(dbf_txt, "fla%x", res_data->fla); 202 sprintf(dbf_txt, "fla%x", link->fla);
306 CIO_TRACE_EVENT( 2, dbf_txt); 203 CIO_TRACE_EVENT( 2, dbf_txt);
307 } 204 }
308 /* Wait until previous actions have settled. */ 205 /* Wait until previous actions have settled. */
@@ -315,7 +212,7 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
315 * will we have to do. 212 * will we have to do.
316 */ 213 */
317 for_each_subchannel_staged(__s390_process_res_acc, 214 for_each_subchannel_staged(__s390_process_res_acc,
318 s390_process_res_acc_new_sch, res_data); 215 s390_process_res_acc_new_sch, link);
319} 216}
320 217
321static int 218static int
@@ -388,7 +285,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
388 285
389static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 286static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
390{ 287{
391 struct res_acc_data res_data; 288 struct chp_link link;
392 struct chp_id chpid; 289 struct chp_id chpid;
393 int status; 290 int status;
394 291
@@ -404,18 +301,18 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
404 chp_new(chpid); 301 chp_new(chpid);
405 else if (!status) 302 else if (!status)
406 return; 303 return;
407 memset(&res_data, 0, sizeof(struct res_acc_data)); 304 memset(&link, 0, sizeof(struct chp_link));
408 res_data.chpid = chpid; 305 link.chpid = chpid;
409 if ((sei_area->vf & 0xc0) != 0) { 306 if ((sei_area->vf & 0xc0) != 0) {
410 res_data.fla = sei_area->fla; 307 link.fla = sei_area->fla;
411 if ((sei_area->vf & 0xc0) == 0xc0) 308 if ((sei_area->vf & 0xc0) == 0xc0)
412 /* full link address */ 309 /* full link address */
413 res_data.fla_mask = 0xffff; 310 link.fla_mask = 0xffff;
414 else 311 else
415 /* link address */ 312 /* link address */
416 res_data.fla_mask = 0xff00; 313 link.fla_mask = 0xff00;
417 } 314 }
418 s390_process_res_acc(&res_data); 315 s390_process_res_acc(&link);
419} 316}
420 317
421struct chp_config_data { 318struct chp_config_data {
@@ -480,17 +377,25 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
480 } 377 }
481} 378}
482 379
483void chsc_process_crw(void) 380static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
484{ 381{
485 struct chsc_sei_area *sei_area; 382 struct chsc_sei_area *sei_area;
486 383
384 if (overflow) {
385 css_schedule_eval_all();
386 return;
387 }
388 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
389 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
390 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
391 crw0->erc, crw0->rsid);
487 if (!sei_page) 392 if (!sei_page)
488 return; 393 return;
489 /* Access to sei_page is serialized through machine check handler 394 /* Access to sei_page is serialized through machine check handler
490 * thread, so no need for locking. */ 395 * thread, so no need for locking. */
491 sei_area = sei_page; 396 sei_area = sei_page;
492 397
493 CIO_TRACE_EVENT( 2, "prcss"); 398 CIO_TRACE_EVENT(2, "prcss");
494 do { 399 do {
495 memset(sei_area, 0, sizeof(*sei_area)); 400 memset(sei_area, 0, sizeof(*sei_area));
496 sei_area->request.length = 0x0010; 401 sei_area->request.length = 0x0010;
@@ -509,114 +414,36 @@ void chsc_process_crw(void)
509 } while (sei_area->flags & 0x80); 414 } while (sei_area->flags & 0x80);
510} 415}
511 416
512static int __chp_add_new_sch(struct subchannel_id schid, void *data)
513{
514 struct schib schib;
515
516 if (stsch_err(schid, &schib))
517 /* We're through */
518 return -ENXIO;
519
520 /* Put it on the slow path. */
521 css_schedule_eval(schid);
522 return 0;
523}
524
525
526static int __chp_add(struct subchannel *sch, void *data)
527{
528 int i, mask;
529 struct chp_id *chpid = data;
530
531 spin_lock_irq(sch->lock);
532 for (i=0; i<8; i++) {
533 mask = 0x80 >> i;
534 if ((sch->schib.pmcw.pim & mask) &&
535 (sch->schib.pmcw.chpid[i] == chpid->id))
536 break;
537 }
538 if (i==8) {
539 spin_unlock_irq(sch->lock);
540 return 0;
541 }
542 if (stsch(sch->schid, &sch->schib)) {
543 spin_unlock_irq(sch->lock);
544 css_schedule_eval(sch->schid);
545 return 0;
546 }
547 sch->lpm = ((sch->schib.pmcw.pim &
548 sch->schib.pmcw.pam &
549 sch->schib.pmcw.pom)
550 | mask) & sch->opm;
551
552 if (sch->driver && sch->driver->verify)
553 sch->driver->verify(sch);
554
555 spin_unlock_irq(sch->lock);
556
557 return 0;
558}
559
560void chsc_chp_online(struct chp_id chpid) 417void chsc_chp_online(struct chp_id chpid)
561{ 418{
562 char dbf_txt[15]; 419 char dbf_txt[15];
420 struct chp_link link;
563 421
564 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 422 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
565 CIO_TRACE_EVENT(2, dbf_txt); 423 CIO_TRACE_EVENT(2, dbf_txt);
566 424
567 if (chp_get_status(chpid) != 0) { 425 if (chp_get_status(chpid) != 0) {
426 memset(&link, 0, sizeof(struct chp_link));
427 link.chpid = chpid;
568 /* Wait until previous actions have settled. */ 428 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path(); 429 css_wait_for_slow_path();
570 for_each_subchannel_staged(__chp_add, __chp_add_new_sch, 430 for_each_subchannel_staged(__s390_process_res_acc, NULL,
571 &chpid); 431 &link);
572 } 432 }
573} 433}
574 434
575static void __s390_subchannel_vary_chpid(struct subchannel *sch, 435static void __s390_subchannel_vary_chpid(struct subchannel *sch,
576 struct chp_id chpid, int on) 436 struct chp_id chpid, int on)
577{ 437{
578 int chp, old_lpm;
579 int mask;
580 unsigned long flags; 438 unsigned long flags;
439 struct chp_link link;
581 440
441 memset(&link, 0, sizeof(struct chp_link));
442 link.chpid = chpid;
582 spin_lock_irqsave(sch->lock, flags); 443 spin_lock_irqsave(sch->lock, flags);
583 old_lpm = sch->lpm; 444 if (sch->driver && sch->driver->chp_event)
584 for (chp = 0; chp < 8; chp++) { 445 sch->driver->chp_event(sch, &link,
585 mask = 0x80 >> chp; 446 on ? CHP_VARY_ON : CHP_VARY_OFF);
586 if (!(sch->ssd_info.path_mask & mask))
587 continue;
588 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
589 continue;
590
591 if (on) {
592 sch->opm |= mask;
593 sch->lpm |= mask;
594 if (!old_lpm)
595 device_trigger_reprobe(sch);
596 else if (sch->driver && sch->driver->verify)
597 sch->driver->verify(sch);
598 break;
599 }
600 sch->opm &= ~mask;
601 sch->lpm &= ~mask;
602 if (check_for_io_on_path(sch, mask)) {
603 if (device_is_online(sch))
604 /* Path verification is done after killing. */
605 device_kill_io(sch);
606 else {
607 /* Kill and retry internal I/O. */
608 terminate_internal_io(sch);
609 /* Re-start path verification. */
610 if (sch->driver && sch->driver->verify)
611 sch->driver->verify(sch);
612 }
613 } else if (!sch->lpm) {
614 if (device_trigger_verify(sch) != 0)
615 css_schedule_eval(sch->schid);
616 } else if (sch->driver && sch->driver->verify)
617 sch->driver->verify(sch);
618 break;
619 }
620 spin_unlock_irqrestore(sch->lock, flags); 447 spin_unlock_irqrestore(sch->lock, flags);
621} 448}
622 449
@@ -656,6 +483,10 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
656 */ 483 */
657int chsc_chp_vary(struct chp_id chpid, int on) 484int chsc_chp_vary(struct chp_id chpid, int on)
658{ 485{
486 struct chp_link link;
487
488 memset(&link, 0, sizeof(struct chp_link));
489 link.chpid = chpid;
659 /* Wait until previous actions have settled. */ 490 /* Wait until previous actions have settled. */
660 css_wait_for_slow_path(); 491 css_wait_for_slow_path();
661 /* 492 /*
@@ -664,10 +495,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
664 495
665 if (on) 496 if (on)
666 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 497 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
667 __s390_vary_chpid_on, &chpid); 498 __s390_vary_chpid_on, &link);
668 else 499 else
669 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 500 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
670 NULL, &chpid); 501 NULL, &link);
671 502
672 return 0; 503 return 0;
673} 504}
@@ -797,23 +628,33 @@ chsc_secm(struct channel_subsystem *css, int enable)
797 return ret; 628 return ret;
798} 629}
799 630
800int chsc_determine_channel_path_description(struct chp_id chpid, 631int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
801 struct channel_path_desc *desc) 632 int c, int m,
633 struct chsc_response_struct *resp)
802{ 634{
803 int ccode, ret; 635 int ccode, ret;
804 636
805 struct { 637 struct {
806 struct chsc_header request; 638 struct chsc_header request;
807 u32 : 24; 639 u32 : 2;
640 u32 m : 1;
641 u32 c : 1;
642 u32 fmt : 4;
643 u32 cssid : 8;
644 u32 : 4;
645 u32 rfmt : 4;
808 u32 first_chpid : 8; 646 u32 first_chpid : 8;
809 u32 : 24; 647 u32 : 24;
810 u32 last_chpid : 8; 648 u32 last_chpid : 8;
811 u32 zeroes1; 649 u32 zeroes1;
812 struct chsc_header response; 650 struct chsc_header response;
813 u32 zeroes2; 651 u8 data[PAGE_SIZE - 20];
814 struct channel_path_desc desc;
815 } __attribute__ ((packed)) *scpd_area; 652 } __attribute__ ((packed)) *scpd_area;
816 653
654 if ((rfmt == 1) && !css_general_characteristics.fcs)
655 return -EINVAL;
656 if ((rfmt == 2) && !css_general_characteristics.cib)
657 return -EINVAL;
817 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 658 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
818 if (!scpd_area) 659 if (!scpd_area)
819 return -ENOMEM; 660 return -ENOMEM;
@@ -821,8 +662,13 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
821 scpd_area->request.length = 0x0010; 662 scpd_area->request.length = 0x0010;
822 scpd_area->request.code = 0x0002; 663 scpd_area->request.code = 0x0002;
823 664
665 scpd_area->cssid = chpid.cssid;
824 scpd_area->first_chpid = chpid.id; 666 scpd_area->first_chpid = chpid.id;
825 scpd_area->last_chpid = chpid.id; 667 scpd_area->last_chpid = chpid.id;
668 scpd_area->m = m;
669 scpd_area->c = c;
670 scpd_area->fmt = fmt;
671 scpd_area->rfmt = rfmt;
826 672
827 ccode = chsc(scpd_area); 673 ccode = chsc(scpd_area);
828 if (ccode > 0) { 674 if (ccode > 0) {
@@ -833,8 +679,7 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
833 ret = chsc_error_from_response(scpd_area->response.code); 679 ret = chsc_error_from_response(scpd_area->response.code);
834 if (ret == 0) 680 if (ret == 0)
835 /* Success. */ 681 /* Success. */
836 memcpy(desc, &scpd_area->desc, 682 memcpy(resp, &scpd_area->response, scpd_area->response.length);
837 sizeof(struct channel_path_desc));
838 else 683 else
839 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 684 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
840 scpd_area->response.code); 685 scpd_area->response.code);
@@ -842,6 +687,25 @@ out:
842 free_page((unsigned long)scpd_area); 687 free_page((unsigned long)scpd_area);
843 return ret; 688 return ret;
844} 689}
690EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
691
692int chsc_determine_base_channel_path_desc(struct chp_id chpid,
693 struct channel_path_desc *desc)
694{
695 struct chsc_response_struct *chsc_resp;
696 int ret;
697
698 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
699 if (!chsc_resp)
700 return -ENOMEM;
701 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
702 if (ret)
703 goto out_free;
704 memcpy(desc, &chsc_resp->data, chsc_resp->length);
705out_free:
706 kfree(chsc_resp);
707 return ret;
708}
845 709
846static void 710static void
847chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 711chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
@@ -937,15 +801,23 @@ out:
937 801
938int __init chsc_alloc_sei_area(void) 802int __init chsc_alloc_sei_area(void)
939{ 803{
804 int ret;
805
940 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 806 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
941 if (!sei_page) 807 if (!sei_page) {
942 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 808 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
943 "chsc machine checks!\n"); 809 "chsc machine checks!\n");
944 return (sei_page ? 0 : -ENOMEM); 810 return -ENOMEM;
811 }
812 ret = s390_register_crw_handler(CRW_RSC_CSS, chsc_process_crw);
813 if (ret)
814 kfree(sei_page);
815 return ret;
945} 816}
946 817
947void __init chsc_free_sei_area(void) 818void __init chsc_free_sei_area(void)
948{ 819{
820 s390_unregister_crw_handler(CRW_RSC_CSS);
949 kfree(sei_page); 821 kfree(sei_page);
950} 822}
951 823
@@ -1043,3 +915,52 @@ exit:
1043 915
1044EXPORT_SYMBOL_GPL(css_general_characteristics); 916EXPORT_SYMBOL_GPL(css_general_characteristics);
1045EXPORT_SYMBOL_GPL(css_chsc_characteristics); 917EXPORT_SYMBOL_GPL(css_chsc_characteristics);
918
919int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
920{
921 struct {
922 struct chsc_header request;
923 unsigned int rsvd0;
924 unsigned int op : 8;
925 unsigned int rsvd1 : 8;
926 unsigned int ctrl : 16;
927 unsigned int rsvd2[5];
928 struct chsc_header response;
929 unsigned int rsvd3[7];
930 } __attribute__ ((packed)) *rr;
931 int rc;
932
933 memset(page, 0, PAGE_SIZE);
934 rr = page;
935 rr->request.length = 0x0020;
936 rr->request.code = 0x0033;
937 rr->op = op;
938 rr->ctrl = ctrl;
939 rc = chsc(rr);
940 if (rc)
941 return -EIO;
942 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
943 return rc;
944}
945
946int chsc_sstpi(void *page, void *result, size_t size)
947{
948 struct {
949 struct chsc_header request;
950 unsigned int rsvd0[3];
951 struct chsc_header response;
952 char data[size];
953 } __attribute__ ((packed)) *rr;
954 int rc;
955
956 memset(page, 0, PAGE_SIZE);
957 rr = page;
958 rr->request.length = 0x0010;
959 rr->request.code = 0x0038;
960 rc = chsc(rr);
961 if (rc)
962 return -EIO;
963 memcpy(result, &rr->data, size);
964 return (rr->response.code == 0x0001) ? 0 : -EIO;
965}
966
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index d1f5db1e69b9..fb6c4d6c45b4 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -4,7 +4,8 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <asm/chpid.h> 6#include <asm/chpid.h>
7#include "schid.h" 7#include <asm/chsc.h>
8#include <asm/schid.h>
8 9
9#define CHSC_SDA_OC_MSS 0x2 10#define CHSC_SDA_OC_MSS 0x2
10 11
@@ -36,14 +37,15 @@ struct channel_path_desc {
36 37
37struct channel_path; 38struct channel_path;
38 39
39extern void chsc_process_crw(void);
40
41struct css_general_char { 40struct css_general_char {
42 u64 : 41; 41 u64 : 12;
42 u32 dynio : 1; /* bit 12 */
43 u32 : 28;
43 u32 aif : 1; /* bit 41 */ 44 u32 aif : 1; /* bit 41 */
44 u32 : 3; 45 u32 : 3;
45 u32 mcss : 1; /* bit 45 */ 46 u32 mcss : 1; /* bit 45 */
46 u32 : 2; 47 u32 fcs : 1; /* bit 46 */
48 u32 : 1;
47 u32 ext_mb : 1; /* bit 48 */ 49 u32 ext_mb : 1; /* bit 48 */
48 u32 : 7; 50 u32 : 7;
49 u32 aif_tdd : 1; /* bit 56 */ 51 u32 aif_tdd : 1; /* bit 56 */
@@ -51,7 +53,11 @@ struct css_general_char {
51 u32 qebsm : 1; /* bit 58 */ 53 u32 qebsm : 1; /* bit 58 */
52 u32 : 8; 54 u32 : 8;
53 u32 aif_osa : 1; /* bit 67 */ 55 u32 aif_osa : 1; /* bit 67 */
54 u32 : 28; 56 u32 : 14;
57 u32 cib : 1; /* bit 82 */
58 u32 : 5;
59 u32 fcx : 1; /* bit 88 */
60 u32 : 7;
55}__attribute__((packed)); 61}__attribute__((packed));
56 62
57struct css_chsc_char { 63struct css_chsc_char {
@@ -78,7 +84,6 @@ struct chsc_ssd_info {
78extern int chsc_get_ssd_info(struct subchannel_id schid, 84extern int chsc_get_ssd_info(struct subchannel_id schid,
79 struct chsc_ssd_info *ssd); 85 struct chsc_ssd_info *ssd);
80extern int chsc_determine_css_characteristics(void); 86extern int chsc_determine_css_characteristics(void);
81extern int css_characteristics_avail;
82extern int chsc_alloc_sei_area(void); 87extern int chsc_alloc_sei_area(void);
83extern void chsc_free_sei_area(void); 88extern void chsc_free_sei_area(void);
84 89
@@ -87,8 +92,11 @@ struct channel_subsystem;
87extern int chsc_secm(struct channel_subsystem *, int); 92extern int chsc_secm(struct channel_subsystem *, int);
88 93
89int chsc_chp_vary(struct chp_id chpid, int on); 94int chsc_chp_vary(struct chp_id chpid, int on);
90int chsc_determine_channel_path_description(struct chp_id chpid, 95int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
91 struct channel_path_desc *desc); 96 int c, int m,
97 struct chsc_response_struct *resp);
98int chsc_determine_base_channel_path_desc(struct chp_id chpid,
99 struct channel_path_desc *desc);
92void chsc_chp_online(struct chp_id chpid); 100void chsc_chp_online(struct chp_id chpid);
93void chsc_chp_offline(struct chp_id chpid); 101void chsc_chp_offline(struct chp_id chpid);
94int chsc_get_channel_measurement_chars(struct channel_path *chp); 102int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
new file mode 100644
index 000000000000..91ca87aa9f97
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.c
@@ -0,0 +1,820 @@
1/*
2 * Driver for s390 chsc subchannels
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 *
7 */
8
9#include <linux/device.h>
10#include <linux/module.h>
11#include <linux/uaccess.h>
12#include <linux/miscdevice.h>
13
14#include <asm/cio.h>
15#include <asm/chsc.h>
16#include <asm/isc.h>
17
18#include "cio.h"
19#include "cio_debug.h"
20#include "css.h"
21#include "chsc_sch.h"
22#include "ioasm.h"
23
24static debug_info_t *chsc_debug_msg_id;
25static debug_info_t *chsc_debug_log_id;
26
27#define CHSC_MSG(imp, args...) do { \
28 debug_sprintf_event(chsc_debug_msg_id, imp , ##args); \
29 } while (0)
30
31#define CHSC_LOG(imp, txt) do { \
32 debug_text_event(chsc_debug_log_id, imp , txt); \
33 } while (0)
34
35static void CHSC_LOG_HEX(int level, void *data, int length)
36{
37 while (length > 0) {
38 debug_event(chsc_debug_log_id, level, data, length);
39 length -= chsc_debug_log_id->buf_size;
40 data += chsc_debug_log_id->buf_size;
41 }
42}
43
44MODULE_AUTHOR("IBM Corporation");
45MODULE_DESCRIPTION("driver for s390 chsc subchannels");
46MODULE_LICENSE("GPL");
47
48static void chsc_subchannel_irq(struct subchannel *sch)
49{
50 struct chsc_private *private = sch->private;
51 struct chsc_request *request = private->request;
52 struct irb *irb = (struct irb *)__LC_IRB;
53
54 CHSC_LOG(4, "irb");
55 CHSC_LOG_HEX(4, irb, sizeof(*irb));
56 /* Copy irb to provided request and set done. */
57 if (!request) {
58 CHSC_MSG(0, "Interrupt on sch 0.%x.%04x with no request\n",
59 sch->schid.ssid, sch->schid.sch_no);
60 return;
61 }
62 private->request = NULL;
63 memcpy(&request->irb, irb, sizeof(*irb));
64 stsch(sch->schid, &sch->schib);
65 complete(&request->completion);
66 put_device(&sch->dev);
67}
68
69static int chsc_subchannel_probe(struct subchannel *sch)
70{
71 struct chsc_private *private;
72 int ret;
73
74 CHSC_MSG(6, "Detected chsc subchannel 0.%x.%04x\n",
75 sch->schid.ssid, sch->schid.sch_no);
76 sch->isc = CHSC_SCH_ISC;
77 private = kzalloc(sizeof(*private), GFP_KERNEL);
78 if (!private)
79 return -ENOMEM;
80 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
81 if (ret) {
82 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
83 sch->schid.ssid, sch->schid.sch_no, ret);
84 kfree(private);
85 } else {
86 sch->private = private;
87 if (sch->dev.uevent_suppress) {
88 sch->dev.uevent_suppress = 0;
89 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
90 }
91 }
92 return ret;
93}
94
95static int chsc_subchannel_remove(struct subchannel *sch)
96{
97 struct chsc_private *private;
98
99 cio_disable_subchannel(sch);
100 private = sch->private;
101 sch->private = NULL;
102 if (private->request) {
103 complete(&private->request->completion);
104 put_device(&sch->dev);
105 }
106 kfree(private);
107 return 0;
108}
109
110static void chsc_subchannel_shutdown(struct subchannel *sch)
111{
112 cio_disable_subchannel(sch);
113}
114
115static struct css_device_id chsc_subchannel_ids[] = {
116 { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, },
117 { /* end of list */ },
118};
119MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
120
121static struct css_driver chsc_subchannel_driver = {
122 .owner = THIS_MODULE,
123 .subchannel_type = chsc_subchannel_ids,
124 .irq = chsc_subchannel_irq,
125 .probe = chsc_subchannel_probe,
126 .remove = chsc_subchannel_remove,
127 .shutdown = chsc_subchannel_shutdown,
128 .name = "chsc_subchannel",
129};
130
131static int __init chsc_init_dbfs(void)
132{
133 chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
134 16 * sizeof(long));
135 if (!chsc_debug_msg_id)
136 goto out;
137 debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
138 debug_set_level(chsc_debug_msg_id, 2);
139 chsc_debug_log_id = debug_register("chsc_log", 16, 1, 16);
140 if (!chsc_debug_log_id)
141 goto out;
142 debug_register_view(chsc_debug_log_id, &debug_hex_ascii_view);
143 debug_set_level(chsc_debug_log_id, 2);
144 return 0;
145out:
146 if (chsc_debug_msg_id)
147 debug_unregister(chsc_debug_msg_id);
148 return -ENOMEM;
149}
150
151static void chsc_remove_dbfs(void)
152{
153 debug_unregister(chsc_debug_log_id);
154 debug_unregister(chsc_debug_msg_id);
155}
156
157static int __init chsc_init_sch_driver(void)
158{
159 return css_driver_register(&chsc_subchannel_driver);
160}
161
162static void chsc_cleanup_sch_driver(void)
163{
164 css_driver_unregister(&chsc_subchannel_driver);
165}
166
167static DEFINE_SPINLOCK(chsc_lock);
168
169static int chsc_subchannel_match_next_free(struct device *dev, void *data)
170{
171 struct subchannel *sch = to_subchannel(dev);
172
173 return sch->schib.pmcw.ena && !scsw_fctl(&sch->schib.scsw);
174}
175
176static struct subchannel *chsc_get_next_subchannel(struct subchannel *sch)
177{
178 struct device *dev;
179
180 dev = driver_find_device(&chsc_subchannel_driver.drv,
181 sch ? &sch->dev : NULL, NULL,
182 chsc_subchannel_match_next_free);
183 return dev ? to_subchannel(dev) : NULL;
184}
185
186/**
187 * chsc_async() - try to start a chsc request asynchronously
188 * @chsc_area: request to be started
189 * @request: request structure to associate
190 *
191 * Tries to start a chsc request on one of the existing chsc subchannels.
192 * Returns:
193 * %0 if the request was performed synchronously
194 * %-EINPROGRESS if the request was successfully started
195 * %-EBUSY if all chsc subchannels are busy
196 * %-ENODEV if no chsc subchannels are available
197 * Context:
198 * interrupts disabled, chsc_lock held
199 */
200static int chsc_async(struct chsc_async_area *chsc_area,
201 struct chsc_request *request)
202{
203 int cc;
204 struct chsc_private *private;
205 struct subchannel *sch = NULL;
206 int ret = -ENODEV;
207 char dbf[10];
208
209 chsc_area->header.key = PAGE_DEFAULT_KEY;
210 while ((sch = chsc_get_next_subchannel(sch))) {
211 spin_lock(sch->lock);
212 private = sch->private;
213 if (private->request) {
214 spin_unlock(sch->lock);
215 ret = -EBUSY;
216 continue;
217 }
218 chsc_area->header.sid = sch->schid;
219 CHSC_LOG(2, "schid");
220 CHSC_LOG_HEX(2, &sch->schid, sizeof(sch->schid));
221 cc = chsc(chsc_area);
222 sprintf(dbf, "cc:%d", cc);
223 CHSC_LOG(2, dbf);
224 switch (cc) {
225 case 0:
226 ret = 0;
227 break;
228 case 1:
229 sch->schib.scsw.cmd.fctl |= SCSW_FCTL_START_FUNC;
230 ret = -EINPROGRESS;
231 private->request = request;
232 break;
233 case 2:
234 ret = -EBUSY;
235 break;
236 default:
237 ret = -ENODEV;
238 }
239 spin_unlock(sch->lock);
240 CHSC_MSG(2, "chsc on 0.%x.%04x returned cc=%d\n",
241 sch->schid.ssid, sch->schid.sch_no, cc);
242 if (ret == -EINPROGRESS)
243 return -EINPROGRESS;
244 put_device(&sch->dev);
245 if (ret == 0)
246 return 0;
247 }
248 return ret;
249}
250
251static void chsc_log_command(struct chsc_async_area *chsc_area)
252{
253 char dbf[10];
254
255 sprintf(dbf, "CHSC:%x", chsc_area->header.code);
256 CHSC_LOG(0, dbf);
257 CHSC_LOG_HEX(0, chsc_area, 32);
258}
259
260static int chsc_examine_irb(struct chsc_request *request)
261{
262 int backed_up;
263
264 if (!scsw_stctl(&request->irb.scsw) & SCSW_STCTL_STATUS_PEND)
265 return -EIO;
266 backed_up = scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHAIN_CHECK;
267 request->irb.scsw.cmd.cstat &= ~SCHN_STAT_CHAIN_CHECK;
268 if (scsw_cstat(&request->irb.scsw) == 0)
269 return 0;
270 if (!backed_up)
271 return 0;
272 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROG_CHECK)
273 return -EIO;
274 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_PROT_CHECK)
275 return -EPERM;
276 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_DATA_CHK)
277 return -EAGAIN;
278 if (scsw_cstat(&request->irb.scsw) & SCHN_STAT_CHN_CTRL_CHK)
279 return -EAGAIN;
280 return -EIO;
281}
282
283static int chsc_ioctl_start(void __user *user_area)
284{
285 struct chsc_request *request;
286 struct chsc_async_area *chsc_area;
287 int ret;
288 char dbf[10];
289
290 if (!css_general_characteristics.dynio)
291 /* It makes no sense to try. */
292 return -EOPNOTSUPP;
293 chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
294 if (!chsc_area)
295 return -ENOMEM;
296 request = kzalloc(sizeof(*request), GFP_KERNEL);
297 if (!request) {
298 ret = -ENOMEM;
299 goto out_free;
300 }
301 init_completion(&request->completion);
302 if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
303 ret = -EFAULT;
304 goto out_free;
305 }
306 chsc_log_command(chsc_area);
307 spin_lock_irq(&chsc_lock);
308 ret = chsc_async(chsc_area, request);
309 spin_unlock_irq(&chsc_lock);
310 if (ret == -EINPROGRESS) {
311 wait_for_completion(&request->completion);
312 ret = chsc_examine_irb(request);
313 }
314 /* copy area back to user */
315 if (!ret)
316 if (copy_to_user(user_area, chsc_area, PAGE_SIZE))
317 ret = -EFAULT;
318out_free:
319 sprintf(dbf, "ret:%d", ret);
320 CHSC_LOG(0, dbf);
321 kfree(request);
322 free_page((unsigned long)chsc_area);
323 return ret;
324}
325
326static int chsc_ioctl_info_channel_path(void __user *user_cd)
327{
328 struct chsc_chp_cd *cd;
329 int ret, ccode;
330 struct {
331 struct chsc_header request;
332 u32 : 2;
333 u32 m : 1;
334 u32 : 1;
335 u32 fmt1 : 4;
336 u32 cssid : 8;
337 u32 : 8;
338 u32 first_chpid : 8;
339 u32 : 24;
340 u32 last_chpid : 8;
341 u32 : 32;
342 struct chsc_header response;
343 u8 data[PAGE_SIZE - 20];
344 } __attribute__ ((packed)) *scpcd_area;
345
346 scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
347 if (!scpcd_area)
348 return -ENOMEM;
349 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
350 if (!cd) {
351 ret = -ENOMEM;
352 goto out_free;
353 }
354 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
355 ret = -EFAULT;
356 goto out_free;
357 }
358 scpcd_area->request.length = 0x0010;
359 scpcd_area->request.code = 0x0028;
360 scpcd_area->m = cd->m;
361 scpcd_area->fmt1 = cd->fmt;
362 scpcd_area->cssid = cd->chpid.cssid;
363 scpcd_area->first_chpid = cd->chpid.id;
364 scpcd_area->last_chpid = cd->chpid.id;
365
366 ccode = chsc(scpcd_area);
367 if (ccode != 0) {
368 ret = -EIO;
369 goto out_free;
370 }
371 if (scpcd_area->response.code != 0x0001) {
372 ret = -EIO;
373 CHSC_MSG(0, "scpcd: response code=%x\n",
374 scpcd_area->response.code);
375 goto out_free;
376 }
377 memcpy(&cd->cpcb, &scpcd_area->response, scpcd_area->response.length);
378 if (copy_to_user(user_cd, cd, sizeof(*cd)))
379 ret = -EFAULT;
380 else
381 ret = 0;
382out_free:
383 kfree(cd);
384 free_page((unsigned long)scpcd_area);
385 return ret;
386}
387
388static int chsc_ioctl_info_cu(void __user *user_cd)
389{
390 struct chsc_cu_cd *cd;
391 int ret, ccode;
392 struct {
393 struct chsc_header request;
394 u32 : 2;
395 u32 m : 1;
396 u32 : 1;
397 u32 fmt1 : 4;
398 u32 cssid : 8;
399 u32 : 8;
400 u32 first_cun : 8;
401 u32 : 24;
402 u32 last_cun : 8;
403 u32 : 32;
404 struct chsc_header response;
405 u8 data[PAGE_SIZE - 20];
406 } __attribute__ ((packed)) *scucd_area;
407
408 scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
409 if (!scucd_area)
410 return -ENOMEM;
411 cd = kzalloc(sizeof(*cd), GFP_KERNEL);
412 if (!cd) {
413 ret = -ENOMEM;
414 goto out_free;
415 }
416 if (copy_from_user(cd, user_cd, sizeof(*cd))) {
417 ret = -EFAULT;
418 goto out_free;
419 }
420 scucd_area->request.length = 0x0010;
421 scucd_area->request.code = 0x0028;
422 scucd_area->m = cd->m;
423 scucd_area->fmt1 = cd->fmt;
424 scucd_area->cssid = cd->cssid;
425 scucd_area->first_cun = cd->cun;
426 scucd_area->last_cun = cd->cun;
427
428 ccode = chsc(scucd_area);
429 if (ccode != 0) {
430 ret = -EIO;
431 goto out_free;
432 }
433 if (scucd_area->response.code != 0x0001) {
434 ret = -EIO;
435 CHSC_MSG(0, "scucd: response code=%x\n",
436 scucd_area->response.code);
437 goto out_free;
438 }
439 memcpy(&cd->cucb, &scucd_area->response, scucd_area->response.length);
440 if (copy_to_user(user_cd, cd, sizeof(*cd)))
441 ret = -EFAULT;
442 else
443 ret = 0;
444out_free:
445 kfree(cd);
446 free_page((unsigned long)scucd_area);
447 return ret;
448}
449
450static int chsc_ioctl_info_sch_cu(void __user *user_cud)
451{
452 struct chsc_sch_cud *cud;
453 int ret, ccode;
454 struct {
455 struct chsc_header request;
456 u32 : 2;
457 u32 m : 1;
458 u32 : 5;
459 u32 fmt1 : 4;
460 u32 : 2;
461 u32 ssid : 2;
462 u32 first_sch : 16;
463 u32 : 8;
464 u32 cssid : 8;
465 u32 last_sch : 16;
466 u32 : 32;
467 struct chsc_header response;
468 u8 data[PAGE_SIZE - 20];
469 } __attribute__ ((packed)) *sscud_area;
470
471 sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
472 if (!sscud_area)
473 return -ENOMEM;
474 cud = kzalloc(sizeof(*cud), GFP_KERNEL);
475 if (!cud) {
476 ret = -ENOMEM;
477 goto out_free;
478 }
479 if (copy_from_user(cud, user_cud, sizeof(*cud))) {
480 ret = -EFAULT;
481 goto out_free;
482 }
483 sscud_area->request.length = 0x0010;
484 sscud_area->request.code = 0x0006;
485 sscud_area->m = cud->schid.m;
486 sscud_area->fmt1 = cud->fmt;
487 sscud_area->ssid = cud->schid.ssid;
488 sscud_area->first_sch = cud->schid.sch_no;
489 sscud_area->cssid = cud->schid.cssid;
490 sscud_area->last_sch = cud->schid.sch_no;
491
492 ccode = chsc(sscud_area);
493 if (ccode != 0) {
494 ret = -EIO;
495 goto out_free;
496 }
497 if (sscud_area->response.code != 0x0001) {
498 ret = -EIO;
499 CHSC_MSG(0, "sscud: response code=%x\n",
500 sscud_area->response.code);
501 goto out_free;
502 }
503 memcpy(&cud->scub, &sscud_area->response, sscud_area->response.length);
504 if (copy_to_user(user_cud, cud, sizeof(*cud)))
505 ret = -EFAULT;
506 else
507 ret = 0;
508out_free:
509 kfree(cud);
510 free_page((unsigned long)sscud_area);
511 return ret;
512}
513
514static int chsc_ioctl_conf_info(void __user *user_ci)
515{
516 struct chsc_conf_info *ci;
517 int ret, ccode;
518 struct {
519 struct chsc_header request;
520 u32 : 2;
521 u32 m : 1;
522 u32 : 1;
523 u32 fmt1 : 4;
524 u32 cssid : 8;
525 u32 : 6;
526 u32 ssid : 2;
527 u32 : 8;
528 u64 : 64;
529 struct chsc_header response;
530 u8 data[PAGE_SIZE - 20];
531 } __attribute__ ((packed)) *sci_area;
532
533 sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
534 if (!sci_area)
535 return -ENOMEM;
536 ci = kzalloc(sizeof(*ci), GFP_KERNEL);
537 if (!ci) {
538 ret = -ENOMEM;
539 goto out_free;
540 }
541 if (copy_from_user(ci, user_ci, sizeof(*ci))) {
542 ret = -EFAULT;
543 goto out_free;
544 }
545 sci_area->request.length = 0x0010;
546 sci_area->request.code = 0x0012;
547 sci_area->m = ci->id.m;
548 sci_area->fmt1 = ci->fmt;
549 sci_area->cssid = ci->id.cssid;
550 sci_area->ssid = ci->id.ssid;
551
552 ccode = chsc(sci_area);
553 if (ccode != 0) {
554 ret = -EIO;
555 goto out_free;
556 }
557 if (sci_area->response.code != 0x0001) {
558 ret = -EIO;
559 CHSC_MSG(0, "sci: response code=%x\n",
560 sci_area->response.code);
561 goto out_free;
562 }
563 memcpy(&ci->scid, &sci_area->response, sci_area->response.length);
564 if (copy_to_user(user_ci, ci, sizeof(*ci)))
565 ret = -EFAULT;
566 else
567 ret = 0;
568out_free:
569 kfree(ci);
570 free_page((unsigned long)sci_area);
571 return ret;
572}
573
574static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
575{
576 struct chsc_comp_list *ccl;
577 int ret, ccode;
578 struct {
579 struct chsc_header request;
580 u32 ctype : 8;
581 u32 : 4;
582 u32 fmt : 4;
583 u32 : 16;
584 u64 : 64;
585 u32 list_parm[2];
586 u64 : 64;
587 struct chsc_header response;
588 u8 data[PAGE_SIZE - 36];
589 } __attribute__ ((packed)) *sccl_area;
590 struct {
591 u32 m : 1;
592 u32 : 31;
593 u32 cssid : 8;
594 u32 : 16;
595 u32 chpid : 8;
596 } __attribute__ ((packed)) *chpid_parm;
597 struct {
598 u32 f_cssid : 8;
599 u32 l_cssid : 8;
600 u32 : 16;
601 u32 res;
602 } __attribute__ ((packed)) *cssids_parm;
603
604 sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
605 if (!sccl_area)
606 return -ENOMEM;
607 ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
608 if (!ccl) {
609 ret = -ENOMEM;
610 goto out_free;
611 }
612 if (copy_from_user(ccl, user_ccl, sizeof(*ccl))) {
613 ret = -EFAULT;
614 goto out_free;
615 }
616 sccl_area->request.length = 0x0020;
617 sccl_area->request.code = 0x0030;
618 sccl_area->fmt = ccl->req.fmt;
619 sccl_area->ctype = ccl->req.ctype;
620 switch (sccl_area->ctype) {
621 case CCL_CU_ON_CHP:
622 case CCL_IOP_CHP:
623 chpid_parm = (void *)&sccl_area->list_parm;
624 chpid_parm->m = ccl->req.chpid.m;
625 chpid_parm->cssid = ccl->req.chpid.chp.cssid;
626 chpid_parm->chpid = ccl->req.chpid.chp.id;
627 break;
628 case CCL_CSS_IMG:
629 case CCL_CSS_IMG_CONF_CHAR:
630 cssids_parm = (void *)&sccl_area->list_parm;
631 cssids_parm->f_cssid = ccl->req.cssids.f_cssid;
632 cssids_parm->l_cssid = ccl->req.cssids.l_cssid;
633 break;
634 }
635 ccode = chsc(sccl_area);
636 if (ccode != 0) {
637 ret = -EIO;
638 goto out_free;
639 }
640 if (sccl_area->response.code != 0x0001) {
641 ret = -EIO;
642 CHSC_MSG(0, "sccl: response code=%x\n",
643 sccl_area->response.code);
644 goto out_free;
645 }
646 memcpy(&ccl->sccl, &sccl_area->response, sccl_area->response.length);
647 if (copy_to_user(user_ccl, ccl, sizeof(*ccl)))
648 ret = -EFAULT;
649 else
650 ret = 0;
651out_free:
652 kfree(ccl);
653 free_page((unsigned long)sccl_area);
654 return ret;
655}
656
657static int chsc_ioctl_chpd(void __user *user_chpd)
658{
659 struct chsc_cpd_info *chpd;
660 int ret;
661
662 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
663 if (!chpd)
664 return -ENOMEM;
665 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
666 ret = -EFAULT;
667 goto out_free;
668 }
669 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
670 chpd->rfmt, chpd->c, chpd->m,
671 &chpd->chpdb);
672 if (ret)
673 goto out_free;
674 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
675 ret = -EFAULT;
676out_free:
677 kfree(chpd);
678 return ret;
679}
680
681static int chsc_ioctl_dcal(void __user *user_dcal)
682{
683 struct chsc_dcal *dcal;
684 int ret, ccode;
685 struct {
686 struct chsc_header request;
687 u32 atype : 8;
688 u32 : 4;
689 u32 fmt : 4;
690 u32 : 16;
691 u32 res0[2];
692 u32 list_parm[2];
693 u32 res1[2];
694 struct chsc_header response;
695 u8 data[PAGE_SIZE - 36];
696 } __attribute__ ((packed)) *sdcal_area;
697
698 sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
699 if (!sdcal_area)
700 return -ENOMEM;
701 dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
702 if (!dcal) {
703 ret = -ENOMEM;
704 goto out_free;
705 }
706 if (copy_from_user(dcal, user_dcal, sizeof(*dcal))) {
707 ret = -EFAULT;
708 goto out_free;
709 }
710 sdcal_area->request.length = 0x0020;
711 sdcal_area->request.code = 0x0034;
712 sdcal_area->atype = dcal->req.atype;
713 sdcal_area->fmt = dcal->req.fmt;
714 memcpy(&sdcal_area->list_parm, &dcal->req.list_parm,
715 sizeof(sdcal_area->list_parm));
716
717 ccode = chsc(sdcal_area);
718 if (ccode != 0) {
719 ret = -EIO;
720 goto out_free;
721 }
722 if (sdcal_area->response.code != 0x0001) {
723 ret = -EIO;
724 CHSC_MSG(0, "sdcal: response code=%x\n",
725 sdcal_area->response.code);
726 goto out_free;
727 }
728 memcpy(&dcal->sdcal, &sdcal_area->response,
729 sdcal_area->response.length);
730 if (copy_to_user(user_dcal, dcal, sizeof(*dcal)))
731 ret = -EFAULT;
732 else
733 ret = 0;
734out_free:
735 kfree(dcal);
736 free_page((unsigned long)sdcal_area);
737 return ret;
738}
739
740static long chsc_ioctl(struct file *filp, unsigned int cmd,
741 unsigned long arg)
742{
743 CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
744 switch (cmd) {
745 case CHSC_START:
746 return chsc_ioctl_start((void __user *)arg);
747 case CHSC_INFO_CHANNEL_PATH:
748 return chsc_ioctl_info_channel_path((void __user *)arg);
749 case CHSC_INFO_CU:
750 return chsc_ioctl_info_cu((void __user *)arg);
751 case CHSC_INFO_SCH_CU:
752 return chsc_ioctl_info_sch_cu((void __user *)arg);
753 case CHSC_INFO_CI:
754 return chsc_ioctl_conf_info((void __user *)arg);
755 case CHSC_INFO_CCL:
756 return chsc_ioctl_conf_comp_list((void __user *)arg);
757 case CHSC_INFO_CPD:
758 return chsc_ioctl_chpd((void __user *)arg);
759 case CHSC_INFO_DCAL:
760 return chsc_ioctl_dcal((void __user *)arg);
761 default: /* unknown ioctl number */
762 return -ENOIOCTLCMD;
763 }
764}
765
766static const struct file_operations chsc_fops = {
767 .owner = THIS_MODULE,
768 .unlocked_ioctl = chsc_ioctl,
769 .compat_ioctl = chsc_ioctl,
770};
771
772static struct miscdevice chsc_misc_device = {
773 .minor = MISC_DYNAMIC_MINOR,
774 .name = "chsc",
775 .fops = &chsc_fops,
776};
777
778static int __init chsc_misc_init(void)
779{
780 return misc_register(&chsc_misc_device);
781}
782
783static void chsc_misc_cleanup(void)
784{
785 misc_deregister(&chsc_misc_device);
786}
787
788static int __init chsc_sch_init(void)
789{
790 int ret;
791
792 ret = chsc_init_dbfs();
793 if (ret)
794 return ret;
795 isc_register(CHSC_SCH_ISC);
796 ret = chsc_init_sch_driver();
797 if (ret)
798 goto out_dbf;
799 ret = chsc_misc_init();
800 if (ret)
801 goto out_driver;
802 return ret;
803out_driver:
804 chsc_cleanup_sch_driver();
805out_dbf:
806 isc_unregister(CHSC_SCH_ISC);
807 chsc_remove_dbfs();
808 return ret;
809}
810
811static void __exit chsc_sch_exit(void)
812{
813 chsc_misc_cleanup();
814 chsc_cleanup_sch_driver();
815 isc_unregister(CHSC_SCH_ISC);
816 chsc_remove_dbfs();
817}
818
819module_init(chsc_sch_init);
820module_exit(chsc_sch_exit);
diff --git a/drivers/s390/cio/chsc_sch.h b/drivers/s390/cio/chsc_sch.h
new file mode 100644
index 000000000000..589ebfad6aad
--- /dev/null
+++ b/drivers/s390/cio/chsc_sch.h
@@ -0,0 +1,13 @@
1#ifndef _CHSC_SCH_H
2#define _CHSC_SCH_H
3
4struct chsc_request {
5 struct completion completion;
6 struct irb irb;
7};
8
9struct chsc_private {
10 struct chsc_request *request;
11};
12
13#endif
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b32d7eb3d81a..33bff8fec7d1 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -2,7 +2,7 @@
2 * drivers/s390/cio/cio.c 2 * drivers/s390/cio/cio.c
3 * S/390 common I/O routines -- low level i/o calls 3 * S/390 common I/O routines -- low level i/o calls
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -24,7 +24,9 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include <asm/airq.h> 26#include <asm/airq.h>
27#include <asm/isc.h>
27#include <asm/cpu.h> 28#include <asm/cpu.h>
29#include <asm/fcx.h>
28#include "cio.h" 30#include "cio.h"
29#include "css.h" 31#include "css.h"
30#include "chsc.h" 32#include "chsc.h"
@@ -72,7 +74,6 @@ out_unregister:
72 debug_unregister(cio_debug_trace_id); 74 debug_unregister(cio_debug_trace_id);
73 if (cio_debug_crw_id) 75 if (cio_debug_crw_id)
74 debug_unregister(cio_debug_crw_id); 76 debug_unregister(cio_debug_crw_id);
75 printk(KERN_WARNING"cio: could not initialize debugging\n");
76 return -1; 77 return -1;
77} 78}
78 79
@@ -128,7 +129,7 @@ cio_tpi(void)
128 local_bh_disable(); 129 local_bh_disable();
129 irq_enter (); 130 irq_enter ();
130 spin_lock(sch->lock); 131 spin_lock(sch->lock);
131 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 132 memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
132 if (sch->driver && sch->driver->irq) 133 if (sch->driver && sch->driver->irq)
133 sch->driver->irq(sch); 134 sch->driver->irq(sch);
134 spin_unlock(sch->lock); 135 spin_unlock(sch->lock);
@@ -167,30 +168,30 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
167{ 168{
168 char dbf_txt[15]; 169 char dbf_txt[15];
169 int ccode; 170 int ccode;
170 struct orb *orb; 171 union orb *orb;
171 172
172 CIO_TRACE_EVENT(4, "stIO"); 173 CIO_TRACE_EVENT(4, "stIO");
173 CIO_TRACE_EVENT(4, sch->dev.bus_id); 174 CIO_TRACE_EVENT(4, sch->dev.bus_id);
174 175
175 orb = &to_io_private(sch)->orb; 176 orb = &to_io_private(sch)->orb;
176 /* sch is always under 2G. */ 177 /* sch is always under 2G. */
177 orb->intparm = (u32)(addr_t)sch; 178 orb->cmd.intparm = (u32)(addr_t)sch;
178 orb->fmt = 1; 179 orb->cmd.fmt = 1;
179 180
180 orb->pfch = sch->options.prefetch == 0; 181 orb->cmd.pfch = sch->options.prefetch == 0;
181 orb->spnd = sch->options.suspend; 182 orb->cmd.spnd = sch->options.suspend;
182 orb->ssic = sch->options.suspend && sch->options.inter; 183 orb->cmd.ssic = sch->options.suspend && sch->options.inter;
183 orb->lpm = (lpm != 0) ? lpm : sch->lpm; 184 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
184#ifdef CONFIG_64BIT 185#ifdef CONFIG_64BIT
185 /* 186 /*
186 * for 64 bit we always support 64 bit IDAWs with 4k page size only 187 * for 64 bit we always support 64 bit IDAWs with 4k page size only
187 */ 188 */
188 orb->c64 = 1; 189 orb->cmd.c64 = 1;
189 orb->i2k = 0; 190 orb->cmd.i2k = 0;
190#endif 191#endif
191 orb->key = key >> 4; 192 orb->cmd.key = key >> 4;
192 /* issue "Start Subchannel" */ 193 /* issue "Start Subchannel" */
193 orb->cpa = (__u32) __pa(cpa); 194 orb->cmd.cpa = (__u32) __pa(cpa);
194 ccode = ssch(sch->schid, orb); 195 ccode = ssch(sch->schid, orb);
195 196
196 /* process condition code */ 197 /* process condition code */
@@ -202,7 +203,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
202 /* 203 /*
203 * initialize device status information 204 * initialize device status information
204 */ 205 */
205 sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; 206 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
206 return 0; 207 return 0;
207 case 1: /* status pending */ 208 case 1: /* status pending */
208 case 2: /* busy */ 209 case 2: /* busy */
@@ -237,7 +238,7 @@ cio_resume (struct subchannel *sch)
237 238
238 switch (ccode) { 239 switch (ccode) {
239 case 0: 240 case 0:
240 sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; 241 sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
241 return 0; 242 return 0;
242 case 1: 243 case 1:
243 return -EBUSY; 244 return -EBUSY;
@@ -277,7 +278,7 @@ cio_halt(struct subchannel *sch)
277 278
278 switch (ccode) { 279 switch (ccode) {
279 case 0: 280 case 0:
280 sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; 281 sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
281 return 0; 282 return 0;
282 case 1: /* status pending */ 283 case 1: /* status pending */
283 case 2: /* busy */ 284 case 2: /* busy */
@@ -312,7 +313,7 @@ cio_clear(struct subchannel *sch)
312 313
313 switch (ccode) { 314 switch (ccode) {
314 case 0: 315 case 0:
315 sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; 316 sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
316 return 0; 317 return 0;
317 default: /* device not operational */ 318 default: /* device not operational */
318 return -ENODEV; 319 return -ENODEV;
@@ -387,8 +388,10 @@ cio_modify (struct subchannel *sch)
387 return ret; 388 return ret;
388} 389}
389 390
390/* 391/**
391 * Enable subchannel. 392 * cio_enable_subchannel - enable a subchannel.
393 * @sch: subchannel to be enabled
394 * @intparm: interruption parameter to set
392 */ 395 */
393int cio_enable_subchannel(struct subchannel *sch, u32 intparm) 396int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
394{ 397{
@@ -434,12 +437,13 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
434 CIO_TRACE_EVENT (2, dbf_txt); 437 CIO_TRACE_EVENT (2, dbf_txt);
435 return ret; 438 return ret;
436} 439}
440EXPORT_SYMBOL_GPL(cio_enable_subchannel);
437 441
438/* 442/**
439 * Disable subchannel. 443 * cio_disable_subchannel - disable a subchannel.
444 * @sch: subchannel to disable
440 */ 445 */
441int 446int cio_disable_subchannel(struct subchannel *sch)
442cio_disable_subchannel (struct subchannel *sch)
443{ 447{
444 char dbf_txt[15]; 448 char dbf_txt[15];
445 int ccode; 449 int ccode;
@@ -455,7 +459,7 @@ cio_disable_subchannel (struct subchannel *sch)
455 if (ccode == 3) /* Not operational. */ 459 if (ccode == 3) /* Not operational. */
456 return -ENODEV; 460 return -ENODEV;
457 461
458 if (sch->schib.scsw.actl != 0) 462 if (scsw_actl(&sch->schib.scsw) != 0)
459 /* 463 /*
460 * the disable function must not be called while there are 464 * the disable function must not be called while there are
461 * requests pending for completion ! 465 * requests pending for completion !
@@ -484,6 +488,7 @@ cio_disable_subchannel (struct subchannel *sch)
484 CIO_TRACE_EVENT (2, dbf_txt); 488 CIO_TRACE_EVENT (2, dbf_txt);
485 return ret; 489 return ret;
486} 490}
491EXPORT_SYMBOL_GPL(cio_disable_subchannel);
487 492
488int cio_create_sch_lock(struct subchannel *sch) 493int cio_create_sch_lock(struct subchannel *sch)
489{ 494{
@@ -494,27 +499,61 @@ int cio_create_sch_lock(struct subchannel *sch)
494 return 0; 499 return 0;
495} 500}
496 501
497/* 502static int cio_check_devno_blacklisted(struct subchannel *sch)
498 * cio_validate_subchannel() 503{
504 if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
505 /*
506 * This device must not be known to Linux. So we simply
507 * say that there is no device and return ENODEV.
508 */
509 CIO_MSG_EVENT(6, "Blacklisted device detected "
510 "at devno %04X, subchannel set %x\n",
511 sch->schib.pmcw.dev, sch->schid.ssid);
512 return -ENODEV;
513 }
514 return 0;
515}
516
517static int cio_validate_io_subchannel(struct subchannel *sch)
518{
519 /* Initialization for io subchannels. */
520 if (!css_sch_is_valid(&sch->schib))
521 return -ENODEV;
522
523 /* Devno is valid. */
524 return cio_check_devno_blacklisted(sch);
525}
526
527static int cio_validate_msg_subchannel(struct subchannel *sch)
528{
529 /* Initialization for message subchannels. */
530 if (!css_sch_is_valid(&sch->schib))
531 return -ENODEV;
532
533 /* Devno is valid. */
534 return cio_check_devno_blacklisted(sch);
535}
536
537/**
538 * cio_validate_subchannel - basic validation of subchannel
539 * @sch: subchannel structure to be filled out
540 * @schid: subchannel id
499 * 541 *
500 * Find out subchannel type and initialize struct subchannel. 542 * Find out subchannel type and initialize struct subchannel.
501 * Return codes: 543 * Return codes:
502 * SUBCHANNEL_TYPE_IO for a normal io subchannel 544 * 0 on success
503 * SUBCHANNEL_TYPE_CHSC for a chsc subchannel
504 * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
505 * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
506 * -ENXIO for non-defined subchannels 545 * -ENXIO for non-defined subchannels
507 * -ENODEV for subchannels with invalid device number or blacklisted devices 546 * -ENODEV for invalid subchannels or blacklisted devices
547 * -EIO for subchannels in an invalid subchannel set
508 */ 548 */
509int 549int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
510cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
511{ 550{
512 char dbf_txt[15]; 551 char dbf_txt[15];
513 int ccode; 552 int ccode;
514 int err; 553 int err;
515 554
516 sprintf (dbf_txt, "valsch%x", schid.sch_no); 555 sprintf(dbf_txt, "valsch%x", schid.sch_no);
517 CIO_TRACE_EVENT (4, dbf_txt); 556 CIO_TRACE_EVENT(4, dbf_txt);
518 557
519 /* Nuke all fields. */ 558 /* Nuke all fields. */
520 memset(sch, 0, sizeof(struct subchannel)); 559 memset(sch, 0, sizeof(struct subchannel));
@@ -546,67 +585,21 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
546 /* Copy subchannel type from path management control word. */ 585 /* Copy subchannel type from path management control word. */
547 sch->st = sch->schib.pmcw.st; 586 sch->st = sch->schib.pmcw.st;
548 587
549 /* 588 switch (sch->st) {
550 * ... just being curious we check for non I/O subchannels 589 case SUBCHANNEL_TYPE_IO:
551 */ 590 err = cio_validate_io_subchannel(sch);
552 if (sch->st != 0) { 591 break;
553 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports " 592 case SUBCHANNEL_TYPE_MSG:
554 "non-I/O subchannel type %04X\n", 593 err = cio_validate_msg_subchannel(sch);
555 sch->schid.ssid, sch->schid.sch_no, sch->st); 594 break;
556 /* We stop here for non-io subchannels. */ 595 default:
557 err = sch->st; 596 err = 0;
558 goto out;
559 }
560
561 /* Initialization for io subchannels. */
562 if (!css_sch_is_valid(&sch->schib)) {
563 err = -ENODEV;
564 goto out;
565 } 597 }
566 598 if (err)
567 /* Devno is valid. */
568 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
569 /*
570 * This device must not be known to Linux. So we simply
571 * say that there is no device and return ENODEV.
572 */
573 CIO_MSG_EVENT(6, "Blacklisted device detected "
574 "at devno %04X, subchannel set %x\n",
575 sch->schib.pmcw.dev, sch->schid.ssid);
576 err = -ENODEV;
577 goto out; 599 goto out;
578 }
579 if (cio_is_console(sch->schid)) {
580 sch->opm = 0xff;
581 sch->isc = 1;
582 } else {
583 sch->opm = chp_get_sch_opm(sch);
584 sch->isc = 3;
585 }
586 sch->lpm = sch->schib.pmcw.pam & sch->opm;
587
588 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X "
589 "- PIM = %02X, PAM = %02X, POM = %02X\n",
590 sch->schib.pmcw.dev, sch->schid.ssid,
591 sch->schid.sch_no, sch->schib.pmcw.pim,
592 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
593 600
594 /* 601 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
595 * We now have to initially ... 602 sch->schid.ssid, sch->schid.sch_no, sch->st);
596 * ... enable "concurrent sense"
597 * ... enable "multipath mode" if more than one
598 * CHPID is available. This is done regardless
599 * whether multiple paths are available for us.
600 */
601 sch->schib.pmcw.csense = 1; /* concurrent sense */
602 sch->schib.pmcw.ena = 0;
603 if ((sch->lpm & (sch->lpm - 1)) != 0)
604 sch->schib.pmcw.mp = 1; /* multipath mode */
605 /* clean up possible residual cmf stuff */
606 sch->schib.pmcw.mme = 0;
607 sch->schib.pmcw.mbfc = 0;
608 sch->schib.pmcw.mbi = 0;
609 sch->schib.mba = 0;
610 return 0; 603 return 0;
611out: 604out:
612 if (!cio_is_console(schid)) 605 if (!cio_is_console(schid))
@@ -647,7 +640,7 @@ do_IRQ (struct pt_regs *regs)
647 */ 640 */
648 if (tpi_info->adapter_IO == 1 && 641 if (tpi_info->adapter_IO == 1 &&
649 tpi_info->int_type == IO_INTERRUPT_TYPE) { 642 tpi_info->int_type == IO_INTERRUPT_TYPE) {
650 do_adapter_IO(); 643 do_adapter_IO(tpi_info->isc);
651 continue; 644 continue;
652 } 645 }
653 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 646 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
@@ -706,9 +699,9 @@ void wait_cons_dev(void)
706 if (!console_subchannel_in_use) 699 if (!console_subchannel_in_use)
707 return; 700 return;
708 701
709 /* disable all but isc 1 (console device) */ 702 /* disable all but the console isc */
710 __ctl_store (save_cr6, 6, 6); 703 __ctl_store (save_cr6, 6, 6);
711 cr6 = 0x40000000; 704 cr6 = 1UL << (31 - CONSOLE_ISC);
712 __ctl_load (cr6, 6, 6); 705 __ctl_load (cr6, 6, 6);
713 706
714 do { 707 do {
@@ -716,7 +709,7 @@ void wait_cons_dev(void)
716 if (!cio_tpi()) 709 if (!cio_tpi())
717 cpu_relax(); 710 cpu_relax();
718 spin_lock(console_subchannel.lock); 711 spin_lock(console_subchannel.lock);
719 } while (console_subchannel.schib.scsw.actl != 0); 712 } while (console_subchannel.schib.scsw.cmd.actl != 0);
720 /* 713 /*
721 * restore previous isc value 714 * restore previous isc value
722 */ 715 */
@@ -761,7 +754,6 @@ cio_get_console_sch_no(void)
761 /* unlike in 2.4, we cannot autoprobe here, since 754 /* unlike in 2.4, we cannot autoprobe here, since
762 * the channel subsystem is not fully initialized. 755 * the channel subsystem is not fully initialized.
763 * With some luck, the HWC console can take over */ 756 * With some luck, the HWC console can take over */
764 printk(KERN_WARNING "cio: No ccw console found!\n");
765 return -1; 757 return -1;
766 } 758 }
767 return console_irq; 759 return console_irq;
@@ -778,6 +770,7 @@ cio_probe_console(void)
778 sch_no = cio_get_console_sch_no(); 770 sch_no = cio_get_console_sch_no();
779 if (sch_no == -1) { 771 if (sch_no == -1) {
780 console_subchannel_in_use = 0; 772 console_subchannel_in_use = 0;
773 printk(KERN_WARNING "cio: No ccw console found!\n");
781 return ERR_PTR(-ENODEV); 774 return ERR_PTR(-ENODEV);
782 } 775 }
783 memset(&console_subchannel, 0, sizeof(struct subchannel)); 776 memset(&console_subchannel, 0, sizeof(struct subchannel));
@@ -790,15 +783,15 @@ cio_probe_console(void)
790 } 783 }
791 784
792 /* 785 /*
793 * enable console I/O-interrupt subclass 1 786 * enable console I/O-interrupt subclass
794 */ 787 */
795 ctl_set_bit(6, 30); 788 isc_register(CONSOLE_ISC);
796 console_subchannel.isc = 1; 789 console_subchannel.schib.pmcw.isc = CONSOLE_ISC;
797 console_subchannel.schib.pmcw.isc = 1;
798 console_subchannel.schib.pmcw.intparm = 790 console_subchannel.schib.pmcw.intparm =
799 (u32)(addr_t)&console_subchannel; 791 (u32)(addr_t)&console_subchannel;
800 ret = cio_modify(&console_subchannel); 792 ret = cio_modify(&console_subchannel);
801 if (ret) { 793 if (ret) {
794 isc_unregister(CONSOLE_ISC);
802 console_subchannel_in_use = 0; 795 console_subchannel_in_use = 0;
803 return ERR_PTR(ret); 796 return ERR_PTR(ret);
804 } 797 }
@@ -810,7 +803,7 @@ cio_release_console(void)
810{ 803{
811 console_subchannel.schib.pmcw.intparm = 0; 804 console_subchannel.schib.pmcw.intparm = 0;
812 cio_modify(&console_subchannel); 805 cio_modify(&console_subchannel);
813 ctl_clear_bit(6, 24); 806 isc_unregister(CONSOLE_ISC);
814 console_subchannel_in_use = 0; 807 console_subchannel_in_use = 0;
815} 808}
816 809
@@ -864,7 +857,7 @@ static void udelay_reset(unsigned long usecs)
864} 857}
865 858
866static int 859static int
867__clear_subchannel_easy(struct subchannel_id schid) 860__clear_io_subchannel_easy(struct subchannel_id schid)
868{ 861{
869 int retry; 862 int retry;
870 863
@@ -883,6 +876,12 @@ __clear_subchannel_easy(struct subchannel_id schid)
883 return -EBUSY; 876 return -EBUSY;
884} 877}
885 878
879static void __clear_chsc_subchannel_easy(void)
880{
881 /* It seems we can only wait for a bit here :/ */
882 udelay_reset(100);
883}
884
886static int pgm_check_occured; 885static int pgm_check_occured;
887 886
888static void cio_reset_pgm_check_handler(void) 887static void cio_reset_pgm_check_handler(void)
@@ -921,11 +920,22 @@ static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
921 case -ENODEV: 920 case -ENODEV:
922 break; 921 break;
923 default: /* -EBUSY */ 922 default: /* -EBUSY */
924 if (__clear_subchannel_easy(schid)) 923 switch (schib.pmcw.st) {
925 break; /* give up... */ 924 case SUBCHANNEL_TYPE_IO:
925 if (__clear_io_subchannel_easy(schid))
926 goto out; /* give up... */
927 break;
928 case SUBCHANNEL_TYPE_CHSC:
929 __clear_chsc_subchannel_easy();
930 break;
931 default:
932 /* No default clear strategy */
933 break;
934 }
926 stsch(schid, &schib); 935 stsch(schid, &schib);
927 __disable_subchannel_easy(schid, &schib); 936 __disable_subchannel_easy(schid, &schib);
928 } 937 }
938out:
929 return 0; 939 return 0;
930} 940}
931 941
@@ -1068,3 +1078,61 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1068 iplinfo->is_qdio = schib.pmcw.qf; 1078 iplinfo->is_qdio = schib.pmcw.qf;
1069 return 0; 1079 return 0;
1070} 1080}
1081
1082/**
1083 * cio_tm_start_key - perform start function
1084 * @sch: subchannel on which to perform the start function
1085 * @tcw: transport-command word to be started
1086 * @lpm: mask of paths to use
1087 * @key: storage key to use for storage access
1088 *
1089 * Start the tcw on the given subchannel. Return zero on success, non-zero
1090 * otherwise.
1091 */
1092int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
1093{
1094 int cc;
1095 union orb *orb = &to_io_private(sch)->orb;
1096
1097 memset(orb, 0, sizeof(union orb));
1098 orb->tm.intparm = (u32) (addr_t) sch;
1099 orb->tm.key = key >> 4;
1100 orb->tm.b = 1;
1101 orb->tm.lpm = lpm ? lpm : sch->lpm;
1102 orb->tm.tcw = (u32) (addr_t) tcw;
1103 cc = ssch(sch->schid, orb);
1104 switch (cc) {
1105 case 0:
1106 return 0;
1107 case 1:
1108 case 2:
1109 return -EBUSY;
1110 default:
1111 return cio_start_handle_notoper(sch, lpm);
1112 }
1113}
1114
1115/**
1116 * cio_tm_intrg - perform interrogate function
1117 * @sch - subchannel on which to perform the interrogate function
1118 *
1119 * If the specified subchannel is running in transport-mode, perform the
1120 * interrogate function. Return zero on success, non-zero otherwie.
1121 */
1122int cio_tm_intrg(struct subchannel *sch)
1123{
1124 int cc;
1125
1126 if (!to_io_private(sch)->orb.tm.b)
1127 return -EINVAL;
1128 cc = xsch(sch->schid);
1129 switch (cc) {
1130 case 0:
1131 case 2:
1132 return 0;
1133 case 1:
1134 return -EBUSY;
1135 default:
1136 return -ENODEV;
1137 }
1138}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 6e933aebe013..3b236d20e835 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -3,9 +3,12 @@
3 3
4#include <linux/mutex.h> 4#include <linux/mutex.h>
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/mod_devicetable.h>
6#include <asm/chpid.h> 7#include <asm/chpid.h>
8#include <asm/cio.h>
9#include <asm/fcx.h>
10#include <asm/schid.h>
7#include "chsc.h" 11#include "chsc.h"
8#include "schid.h"
9 12
10/* 13/*
11 * path management control word 14 * path management control word
@@ -13,7 +16,7 @@
13struct pmcw { 16struct pmcw {
14 u32 intparm; /* interruption parameter */ 17 u32 intparm; /* interruption parameter */
15 u32 qf : 1; /* qdio facility */ 18 u32 qf : 1; /* qdio facility */
16 u32 res0 : 1; /* reserved zeros */ 19 u32 w : 1;
17 u32 isc : 3; /* interruption sublass */ 20 u32 isc : 3; /* interruption sublass */
18 u32 res5 : 3; /* reserved zeros */ 21 u32 res5 : 3; /* reserved zeros */
19 u32 ena : 1; /* enabled */ 22 u32 ena : 1; /* enabled */
@@ -47,7 +50,7 @@ struct pmcw {
47 */ 50 */
48struct schib { 51struct schib {
49 struct pmcw pmcw; /* path management control word */ 52 struct pmcw pmcw; /* path management control word */
50 struct scsw scsw; /* subchannel status word */ 53 union scsw scsw; /* subchannel status word */
51 __u64 mba; /* measurement block address */ 54 __u64 mba; /* measurement block address */
52 __u8 mda[4]; /* model dependent area */ 55 __u8 mda[4]; /* model dependent area */
53} __attribute__ ((packed,aligned(4))); 56} __attribute__ ((packed,aligned(4)));
@@ -99,8 +102,11 @@ extern int cio_set_options (struct subchannel *, int);
99extern int cio_get_options (struct subchannel *); 102extern int cio_get_options (struct subchannel *);
100extern int cio_modify (struct subchannel *); 103extern int cio_modify (struct subchannel *);
101 104
105int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
106int cio_tm_intrg(struct subchannel *sch);
107
102int cio_create_sch_lock(struct subchannel *); 108int cio_create_sch_lock(struct subchannel *);
103void do_adapter_IO(void); 109void do_adapter_IO(u8 isc);
104void do_IRQ(struct pt_regs *); 110void do_IRQ(struct pt_regs *);
105 111
106/* Use with care. */ 112/* Use with care. */
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2808b6833b9e..a90b28c0be57 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -341,12 +341,12 @@ static int cmf_copy_block(struct ccw_device *cdev)
341 if (stsch(sch->schid, &sch->schib)) 341 if (stsch(sch->schid, &sch->schib))
342 return -ENODEV; 342 return -ENODEV;
343 343
344 if (sch->schib.scsw.fctl & SCSW_FCTL_START_FUNC) { 344 if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
345 /* Don't copy if a start function is in progress. */ 345 /* Don't copy if a start function is in progress. */
346 if ((!(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) && 346 if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
347 (sch->schib.scsw.actl & 347 (scsw_actl(&sch->schib.scsw) &
348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) && 348 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
349 (!(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS))) 349 (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
350 return -EBUSY; 350 return -EBUSY;
351 } 351 }
352 cmb_data = cdev->private->cmb; 352 cmb_data = cdev->private->cmb;
@@ -612,9 +612,6 @@ static int alloc_cmb(struct ccw_device *cdev)
612 free_pages((unsigned long)mem, get_order(size)); 612 free_pages((unsigned long)mem, get_order(size));
613 } else if (!mem) { 613 } else if (!mem) {
614 /* no luck */ 614 /* no luck */
615 printk(KERN_WARNING "cio: failed to allocate area "
616 "for measuring %d subchannels\n",
617 cmb_area.num_channels);
618 ret = -ENOMEM; 615 ret = -ENOMEM;
619 goto out; 616 goto out;
620 } else { 617 } else {
@@ -1230,13 +1227,9 @@ static ssize_t cmb_enable_store(struct device *dev,
1230 switch (val) { 1227 switch (val) {
1231 case 0: 1228 case 0:
1232 ret = disable_cmf(cdev); 1229 ret = disable_cmf(cdev);
1233 if (ret)
1234 dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
1235 break; 1230 break;
1236 case 1: 1231 case 1:
1237 ret = enable_cmf(cdev); 1232 ret = enable_cmf(cdev);
1238 if (ret && ret != -EBUSY)
1239 dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
1240 break; 1233 break;
1241 } 1234 }
1242 1235
@@ -1344,8 +1337,7 @@ static int __init init_cmf(void)
1344 * to basic mode. 1337 * to basic mode.
1345 */ 1338 */
1346 if (format == CMF_AUTODETECT) { 1339 if (format == CMF_AUTODETECT) {
1347 if (!css_characteristics_avail || 1340 if (!css_general_characteristics.ext_mb) {
1348 !css_general_characteristics.ext_mb) {
1349 format = CMF_BASIC; 1341 format = CMF_BASIC;
1350 } else { 1342 } else {
1351 format = CMF_EXTENDED; 1343 format = CMF_EXTENDED;
@@ -1365,8 +1357,6 @@ static int __init init_cmf(void)
1365 cmbops = &cmbops_extended; 1357 cmbops = &cmbops_extended;
1366 break; 1358 break;
1367 default: 1359 default:
1368 printk(KERN_ERR "cio: Invalid format %d for channel "
1369 "measurement facility\n", format);
1370 return 1; 1360 return 1;
1371 } 1361 }
1372 1362
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a76956512b2d..46c021d880dc 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/css.c 2 * drivers/s390/cio/css.c
3 * driver for channel subsystem 3 * driver for channel subsystem
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 */ 8 */
@@ -14,7 +13,9 @@
14#include <linux/errno.h> 13#include <linux/errno.h>
15#include <linux/list.h> 14#include <linux/list.h>
16#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <asm/isc.h>
17 17
18#include "../s390mach.h"
18#include "css.h" 19#include "css.h"
19#include "cio.h" 20#include "cio.h"
20#include "cio_debug.h" 21#include "cio_debug.h"
@@ -30,8 +31,6 @@ static int max_ssid = 0;
30 31
31struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 32struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
32 33
33int css_characteristics_avail = 0;
34
35int 34int
36for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 35for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
37{ 36{
@@ -121,25 +120,6 @@ css_alloc_subchannel(struct subchannel_id schid)
121 kfree(sch); 120 kfree(sch);
122 return ERR_PTR(ret); 121 return ERR_PTR(ret);
123 } 122 }
124
125 if (sch->st != SUBCHANNEL_TYPE_IO) {
126 /* For now we ignore all non-io subchannels. */
127 kfree(sch);
128 return ERR_PTR(-EINVAL);
129 }
130
131 /*
132 * Set intparm to subchannel address.
133 * This is fine even on 64bit since the subchannel is always located
134 * under 2G.
135 */
136 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
137 ret = cio_modify(sch);
138 if (ret) {
139 kfree(sch->lock);
140 kfree(sch);
141 return ERR_PTR(ret);
142 }
143 return sch; 123 return sch;
144} 124}
145 125
@@ -177,12 +157,18 @@ static int css_sch_device_register(struct subchannel *sch)
177 return ret; 157 return ret;
178} 158}
179 159
160/**
161 * css_sch_device_unregister - unregister a subchannel
162 * @sch: subchannel to be unregistered
163 */
180void css_sch_device_unregister(struct subchannel *sch) 164void css_sch_device_unregister(struct subchannel *sch)
181{ 165{
182 mutex_lock(&sch->reg_mutex); 166 mutex_lock(&sch->reg_mutex);
183 device_unregister(&sch->dev); 167 if (device_is_registered(&sch->dev))
168 device_unregister(&sch->dev);
184 mutex_unlock(&sch->reg_mutex); 169 mutex_unlock(&sch->reg_mutex);
185} 170}
171EXPORT_SYMBOL_GPL(css_sch_device_unregister);
186 172
187static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 173static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
188{ 174{
@@ -229,6 +215,41 @@ void css_update_ssd_info(struct subchannel *sch)
229 } 215 }
230} 216}
231 217
218static ssize_t type_show(struct device *dev, struct device_attribute *attr,
219 char *buf)
220{
221 struct subchannel *sch = to_subchannel(dev);
222
223 return sprintf(buf, "%01x\n", sch->st);
224}
225
226static DEVICE_ATTR(type, 0444, type_show, NULL);
227
228static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
229 char *buf)
230{
231 struct subchannel *sch = to_subchannel(dev);
232
233 return sprintf(buf, "css:t%01X\n", sch->st);
234}
235
236static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
237
238static struct attribute *subch_attrs[] = {
239 &dev_attr_type.attr,
240 &dev_attr_modalias.attr,
241 NULL,
242};
243
244static struct attribute_group subch_attr_group = {
245 .attrs = subch_attrs,
246};
247
248static struct attribute_group *default_subch_attr_groups[] = {
249 &subch_attr_group,
250 NULL,
251};
252
232static int css_register_subchannel(struct subchannel *sch) 253static int css_register_subchannel(struct subchannel *sch)
233{ 254{
234 int ret; 255 int ret;
@@ -237,16 +258,17 @@ static int css_register_subchannel(struct subchannel *sch)
237 sch->dev.parent = &channel_subsystems[0]->device; 258 sch->dev.parent = &channel_subsystems[0]->device;
238 sch->dev.bus = &css_bus_type; 259 sch->dev.bus = &css_bus_type;
239 sch->dev.release = &css_subchannel_release; 260 sch->dev.release = &css_subchannel_release;
240 sch->dev.groups = subch_attr_groups; 261 sch->dev.groups = default_subch_attr_groups;
241 /* 262 /*
242 * We don't want to generate uevents for I/O subchannels that don't 263 * We don't want to generate uevents for I/O subchannels that don't
243 * have a working ccw device behind them since they will be 264 * have a working ccw device behind them since they will be
244 * unregistered before they can be used anyway, so we delay the add 265 * unregistered before they can be used anyway, so we delay the add
245 * uevent until after device recognition was successful. 266 * uevent until after device recognition was successful.
267 * Note that we suppress the uevent for all subchannel types;
268 * the subchannel driver can decide itself when it wants to inform
269 * userspace of its existence.
246 */ 270 */
247 if (!cio_is_console(sch->schid)) 271 sch->dev.uevent_suppress = 1;
248 /* Console is special, no need to suppress. */
249 sch->dev.uevent_suppress = 1;
250 css_update_ssd_info(sch); 272 css_update_ssd_info(sch);
251 /* make it known to the system */ 273 /* make it known to the system */
252 ret = css_sch_device_register(sch); 274 ret = css_sch_device_register(sch);
@@ -255,10 +277,19 @@ static int css_register_subchannel(struct subchannel *sch)
255 sch->schid.ssid, sch->schid.sch_no, ret); 277 sch->schid.ssid, sch->schid.sch_no, ret);
256 return ret; 278 return ret;
257 } 279 }
280 if (!sch->driver) {
281 /*
282 * No driver matched. Generate the uevent now so that
283 * a fitting driver module may be loaded based on the
284 * modalias.
285 */
286 sch->dev.uevent_suppress = 0;
287 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
288 }
258 return ret; 289 return ret;
259} 290}
260 291
261static int css_probe_device(struct subchannel_id schid) 292int css_probe_device(struct subchannel_id schid)
262{ 293{
263 int ret; 294 int ret;
264 struct subchannel *sch; 295 struct subchannel *sch;
@@ -301,116 +332,12 @@ int css_sch_is_valid(struct schib *schib)
301{ 332{
302 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 333 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
303 return 0; 334 return 0;
335 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
336 return 0;
304 return 1; 337 return 1;
305} 338}
306EXPORT_SYMBOL_GPL(css_sch_is_valid); 339EXPORT_SYMBOL_GPL(css_sch_is_valid);
307 340
308static int css_get_subchannel_status(struct subchannel *sch)
309{
310 struct schib schib;
311
312 if (stsch(sch->schid, &schib))
313 return CIO_GONE;
314 if (!css_sch_is_valid(&schib))
315 return CIO_GONE;
316 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
317 return CIO_REVALIDATE;
318 if (!sch->lpm)
319 return CIO_NO_PATH;
320 return CIO_OPER;
321}
322
323static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
324{
325 int event, ret, disc;
326 unsigned long flags;
327 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
328
329 spin_lock_irqsave(sch->lock, flags);
330 disc = device_is_disconnected(sch);
331 if (disc && slow) {
332 /* Disconnected devices are evaluated directly only.*/
333 spin_unlock_irqrestore(sch->lock, flags);
334 return 0;
335 }
336 /* No interrupt after machine check - kill pending timers. */
337 device_kill_pending_timer(sch);
338 if (!disc && !slow) {
339 /* Non-disconnected devices are evaluated on the slow path. */
340 spin_unlock_irqrestore(sch->lock, flags);
341 return -EAGAIN;
342 }
343 event = css_get_subchannel_status(sch);
344 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
345 sch->schid.ssid, sch->schid.sch_no, event,
346 disc ? "disconnected" : "normal",
347 slow ? "slow" : "fast");
348 /* Analyze subchannel status. */
349 action = NONE;
350 switch (event) {
351 case CIO_NO_PATH:
352 if (disc) {
353 /* Check if paths have become available. */
354 action = REPROBE;
355 break;
356 }
357 /* fall through */
358 case CIO_GONE:
359 /* Prevent unwanted effects when opening lock. */
360 cio_disable_subchannel(sch);
361 device_set_disconnected(sch);
362 /* Ask driver what to do with device. */
363 action = UNREGISTER;
364 if (sch->driver && sch->driver->notify) {
365 spin_unlock_irqrestore(sch->lock, flags);
366 ret = sch->driver->notify(sch, event);
367 spin_lock_irqsave(sch->lock, flags);
368 if (ret)
369 action = NONE;
370 }
371 break;
372 case CIO_REVALIDATE:
373 /* Device will be removed, so no notify necessary. */
374 if (disc)
375 /* Reprobe because immediate unregister might block. */
376 action = REPROBE;
377 else
378 action = UNREGISTER_PROBE;
379 break;
380 case CIO_OPER:
381 if (disc)
382 /* Get device operational again. */
383 action = REPROBE;
384 break;
385 }
386 /* Perform action. */
387 ret = 0;
388 switch (action) {
389 case UNREGISTER:
390 case UNREGISTER_PROBE:
391 /* Unregister device (will use subchannel lock). */
392 spin_unlock_irqrestore(sch->lock, flags);
393 css_sch_device_unregister(sch);
394 spin_lock_irqsave(sch->lock, flags);
395
396 /* Reset intparm to zeroes. */
397 sch->schib.pmcw.intparm = 0;
398 cio_modify(sch);
399 break;
400 case REPROBE:
401 device_trigger_reprobe(sch);
402 break;
403 default:
404 break;
405 }
406 spin_unlock_irqrestore(sch->lock, flags);
407 /* Probe if necessary. */
408 if (action == UNREGISTER_PROBE)
409 ret = css_probe_device(sch->schid);
410
411 return ret;
412}
413
414static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 341static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
415{ 342{
416 struct schib schib; 343 struct schib schib;
@@ -429,6 +356,21 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
429 return css_probe_device(schid); 356 return css_probe_device(schid);
430} 357}
431 358
359static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
360{
361 int ret = 0;
362
363 if (sch->driver) {
364 if (sch->driver->sch_event)
365 ret = sch->driver->sch_event(sch, slow);
366 else
367 dev_dbg(&sch->dev,
368 "Got subchannel machine check but "
369 "no sch_event handler provided.\n");
370 }
371 return ret;
372}
373
432static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 374static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
433{ 375{
434 struct subchannel *sch; 376 struct subchannel *sch;
@@ -596,18 +538,29 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
596/* 538/*
597 * Called from the machine check handler for subchannel report words. 539 * Called from the machine check handler for subchannel report words.
598 */ 540 */
599void css_process_crw(int rsid1, int rsid2) 541static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
600{ 542{
601 struct subchannel_id mchk_schid; 543 struct subchannel_id mchk_schid;
602 544
603 CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", 545 if (overflow) {
604 rsid1, rsid2); 546 css_schedule_eval_all();
547 return;
548 }
549 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
550 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
551 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
552 crw0->erc, crw0->rsid);
553 if (crw1)
554 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
555 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
556 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
557 crw1->anc, crw1->erc, crw1->rsid);
605 init_subchannel_id(&mchk_schid); 558 init_subchannel_id(&mchk_schid);
606 mchk_schid.sch_no = rsid1; 559 mchk_schid.sch_no = crw0->rsid;
607 if (rsid2 != 0) 560 if (crw1)
608 mchk_schid.ssid = (rsid2 >> 8) & 3; 561 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
609 562
610 /* 563 /*
611 * Since we are always presented with IPI in the CRW, we have to 564 * Since we are always presented with IPI in the CRW, we have to
612 * use stsch() to find out if the subchannel in question has come 565 * use stsch() to find out if the subchannel in question has come
613 * or gone. 566 * or gone.
@@ -658,7 +611,7 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
658static void __init 611static void __init
659css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 612css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
660{ 613{
661 if (css_characteristics_avail && css_general_characteristics.mcss) { 614 if (css_general_characteristics.mcss) {
662 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 615 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
663 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 616 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
664 } else { 617 } else {
@@ -795,8 +748,6 @@ init_channel_subsystem (void)
795 ret = chsc_determine_css_characteristics(); 748 ret = chsc_determine_css_characteristics();
796 if (ret == -ENOMEM) 749 if (ret == -ENOMEM)
797 goto out; /* No need to continue. */ 750 goto out; /* No need to continue. */
798 if (ret == 0)
799 css_characteristics_avail = 1;
800 751
801 ret = chsc_alloc_sei_area(); 752 ret = chsc_alloc_sei_area();
802 if (ret) 753 if (ret)
@@ -806,6 +757,10 @@ init_channel_subsystem (void)
806 if (ret) 757 if (ret)
807 goto out; 758 goto out;
808 759
760 ret = s390_register_crw_handler(CRW_RSC_SCH, css_process_crw);
761 if (ret)
762 goto out;
763
809 if ((ret = bus_register(&css_bus_type))) 764 if ((ret = bus_register(&css_bus_type)))
810 goto out; 765 goto out;
811 766
@@ -836,8 +791,7 @@ init_channel_subsystem (void)
836 ret = device_register(&css->device); 791 ret = device_register(&css->device);
837 if (ret) 792 if (ret)
838 goto out_free_all; 793 goto out_free_all;
839 if (css_characteristics_avail && 794 if (css_chsc_characteristics.secm) {
840 css_chsc_characteristics.secm) {
841 ret = device_create_file(&css->device, 795 ret = device_create_file(&css->device,
842 &dev_attr_cm_enable); 796 &dev_attr_cm_enable);
843 if (ret) 797 if (ret)
@@ -852,7 +806,8 @@ init_channel_subsystem (void)
852 goto out_pseudo; 806 goto out_pseudo;
853 css_init_done = 1; 807 css_init_done = 1;
854 808
855 ctl_set_bit(6, 28); 809 /* Enable default isc for I/O subchannels. */
810 isc_register(IO_SCH_ISC);
856 811
857 for_each_subchannel(__init_channel_subsystem, NULL); 812 for_each_subchannel(__init_channel_subsystem, NULL);
858 return 0; 813 return 0;
@@ -875,7 +830,7 @@ out_unregister:
875 i--; 830 i--;
876 css = channel_subsystems[i]; 831 css = channel_subsystems[i];
877 device_unregister(&css->pseudo_subchannel->dev); 832 device_unregister(&css->pseudo_subchannel->dev);
878 if (css_characteristics_avail && css_chsc_characteristics.secm) 833 if (css_chsc_characteristics.secm)
879 device_remove_file(&css->device, 834 device_remove_file(&css->device,
880 &dev_attr_cm_enable); 835 &dev_attr_cm_enable);
881 device_unregister(&css->device); 836 device_unregister(&css->device);
@@ -883,6 +838,7 @@ out_unregister:
883out_bus: 838out_bus:
884 bus_unregister(&css_bus_type); 839 bus_unregister(&css_bus_type);
885out: 840out:
841 s390_unregister_crw_handler(CRW_RSC_CSS);
886 chsc_free_sei_area(); 842 chsc_free_sei_area();
887 kfree(slow_subchannel_set); 843 kfree(slow_subchannel_set);
888 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", 844 printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n",
@@ -895,19 +851,16 @@ int sch_is_pseudo_sch(struct subchannel *sch)
895 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 851 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
896} 852}
897 853
898/* 854static int css_bus_match(struct device *dev, struct device_driver *drv)
899 * find a driver for a subchannel. They identify by the subchannel
900 * type with the exception that the console subchannel driver has its own
901 * subchannel type although the device is an i/o subchannel
902 */
903static int
904css_bus_match (struct device *dev, struct device_driver *drv)
905{ 855{
906 struct subchannel *sch = to_subchannel(dev); 856 struct subchannel *sch = to_subchannel(dev);
907 struct css_driver *driver = to_cssdriver(drv); 857 struct css_driver *driver = to_cssdriver(drv);
858 struct css_device_id *id;
908 859
909 if (sch->st == driver->subchannel_type) 860 for (id = driver->subchannel_type; id->match_flags; id++) {
910 return 1; 861 if (sch->st == id->type)
862 return 1;
863 }
911 864
912 return 0; 865 return 0;
913} 866}
@@ -945,12 +898,25 @@ static void css_shutdown(struct device *dev)
945 sch->driver->shutdown(sch); 898 sch->driver->shutdown(sch);
946} 899}
947 900
901static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
902{
903 struct subchannel *sch = to_subchannel(dev);
904 int ret;
905
906 ret = add_uevent_var(env, "ST=%01X", sch->st);
907 if (ret)
908 return ret;
909 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
910 return ret;
911}
912
948struct bus_type css_bus_type = { 913struct bus_type css_bus_type = {
949 .name = "css", 914 .name = "css",
950 .match = css_bus_match, 915 .match = css_bus_match,
951 .probe = css_probe, 916 .probe = css_probe,
952 .remove = css_remove, 917 .remove = css_remove,
953 .shutdown = css_shutdown, 918 .shutdown = css_shutdown,
919 .uevent = css_uevent,
954}; 920};
955 921
956/** 922/**
@@ -985,4 +951,3 @@ subsys_initcall(init_channel_subsystem);
985 951
986MODULE_LICENSE("GPL"); 952MODULE_LICENSE("GPL");
987EXPORT_SYMBOL(css_bus_type); 953EXPORT_SYMBOL(css_bus_type);
988EXPORT_SYMBOL_GPL(css_characteristics_avail);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index e1913518f354..57ebf120f825 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -9,8 +9,7 @@
9 9
10#include <asm/cio.h> 10#include <asm/cio.h>
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12 12#include <asm/schid.h>
13#include "schid.h"
14 13
15/* 14/*
16 * path grouping stuff 15 * path grouping stuff
@@ -58,20 +57,28 @@ struct pgid {
58 __u32 tod_high; /* high word TOD clock */ 57 __u32 tod_high; /* high word TOD clock */
59} __attribute__ ((packed)); 58} __attribute__ ((packed));
60 59
61/*
62 * A css driver handles all subchannels of one type.
63 * Currently, we only care about I/O subchannels (type 0), these
64 * have a ccw_device connected to them.
65 */
66struct subchannel; 60struct subchannel;
61struct chp_link;
62/**
63 * struct css_driver - device driver for subchannels
64 * @owner: owning module
65 * @subchannel_type: subchannel type supported by this driver
66 * @drv: embedded device driver structure
67 * @irq: called on interrupts
68 * @chp_event: called for events affecting a channel path
69 * @sch_event: called for events affecting the subchannel
70 * @probe: function called on probe
71 * @remove: function called on remove
72 * @shutdown: called at device shutdown
73 * @name: name of the device driver
74 */
67struct css_driver { 75struct css_driver {
68 struct module *owner; 76 struct module *owner;
69 unsigned int subchannel_type; 77 struct css_device_id *subchannel_type;
70 struct device_driver drv; 78 struct device_driver drv;
71 void (*irq)(struct subchannel *); 79 void (*irq)(struct subchannel *);
72 int (*notify)(struct subchannel *, int); 80 int (*chp_event)(struct subchannel *, struct chp_link *, int);
73 void (*verify)(struct subchannel *); 81 int (*sch_event)(struct subchannel *, int);
74 void (*termination)(struct subchannel *);
75 int (*probe)(struct subchannel *); 82 int (*probe)(struct subchannel *);
76 int (*remove)(struct subchannel *); 83 int (*remove)(struct subchannel *);
77 void (*shutdown)(struct subchannel *); 84 void (*shutdown)(struct subchannel *);
@@ -89,13 +96,13 @@ extern int css_driver_register(struct css_driver *);
89extern void css_driver_unregister(struct css_driver *); 96extern void css_driver_unregister(struct css_driver *);
90 97
91extern void css_sch_device_unregister(struct subchannel *); 98extern void css_sch_device_unregister(struct subchannel *);
92extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 99extern int css_probe_device(struct subchannel_id);
100extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
93extern int css_init_done; 101extern int css_init_done;
94int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 102int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
95 int (*fn_unknown)(struct subchannel_id, 103 int (*fn_unknown)(struct subchannel_id,
96 void *), void *data); 104 void *), void *data);
97extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 105extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
98extern void css_process_crw(int, int);
99extern void css_reiterate_subchannels(void); 106extern void css_reiterate_subchannels(void);
100void css_update_ssd_info(struct subchannel *sch); 107void css_update_ssd_info(struct subchannel *sch);
101 108
@@ -121,20 +128,6 @@ struct channel_subsystem {
121extern struct bus_type css_bus_type; 128extern struct bus_type css_bus_type;
122extern struct channel_subsystem *channel_subsystems[]; 129extern struct channel_subsystem *channel_subsystems[];
123 130
124/* Some helper functions for disconnected state. */
125int device_is_disconnected(struct subchannel *);
126void device_set_disconnected(struct subchannel *);
127void device_trigger_reprobe(struct subchannel *);
128
129/* Helper functions for vary on/off. */
130int device_is_online(struct subchannel *);
131void device_kill_io(struct subchannel *);
132void device_set_intretry(struct subchannel *sch);
133int device_trigger_verify(struct subchannel *sch);
134
135/* Machine check helper function. */
136void device_kill_pending_timer(struct subchannel *);
137
138/* Helper functions to build lists for the slow path. */ 131/* Helper functions to build lists for the slow path. */
139void css_schedule_eval(struct subchannel_id schid); 132void css_schedule_eval(struct subchannel_id schid);
140void css_schedule_eval_all(void); 133void css_schedule_eval_all(void);
@@ -145,6 +138,4 @@ int css_sch_is_valid(struct schib *);
145 138
146extern struct workqueue_struct *slow_path_wq; 139extern struct workqueue_struct *slow_path_wq;
147void css_wait_for_slow_path(void); 140void css_wait_for_slow_path(void);
148
149extern struct attribute_group *subch_attr_groups[];
150#endif 141#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index e22813db74a2..e818d0c54c09 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device.c 2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices 3 * bus driver for ccw devices
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
@@ -23,7 +22,9 @@
23#include <asm/cio.h> 22#include <asm/cio.h>
24#include <asm/param.h> /* HZ */ 23#include <asm/param.h> /* HZ */
25#include <asm/cmb.h> 24#include <asm/cmb.h>
25#include <asm/isc.h>
26 26
27#include "chp.h"
27#include "cio.h" 28#include "cio.h"
28#include "cio_debug.h" 29#include "cio_debug.h"
29#include "css.h" 30#include "css.h"
@@ -125,19 +126,24 @@ struct bus_type ccw_bus_type;
125static void io_subchannel_irq(struct subchannel *); 126static void io_subchannel_irq(struct subchannel *);
126static int io_subchannel_probe(struct subchannel *); 127static int io_subchannel_probe(struct subchannel *);
127static int io_subchannel_remove(struct subchannel *); 128static int io_subchannel_remove(struct subchannel *);
128static int io_subchannel_notify(struct subchannel *, int);
129static void io_subchannel_verify(struct subchannel *);
130static void io_subchannel_ioterm(struct subchannel *);
131static void io_subchannel_shutdown(struct subchannel *); 129static void io_subchannel_shutdown(struct subchannel *);
130static int io_subchannel_sch_event(struct subchannel *, int);
131static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
132 int);
133
134static struct css_device_id io_subchannel_ids[] = {
135 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
136 { /* end of list */ },
137};
138MODULE_DEVICE_TABLE(css, io_subchannel_ids);
132 139
133static struct css_driver io_subchannel_driver = { 140static struct css_driver io_subchannel_driver = {
134 .owner = THIS_MODULE, 141 .owner = THIS_MODULE,
135 .subchannel_type = SUBCHANNEL_TYPE_IO, 142 .subchannel_type = io_subchannel_ids,
136 .name = "io_subchannel", 143 .name = "io_subchannel",
137 .irq = io_subchannel_irq, 144 .irq = io_subchannel_irq,
138 .notify = io_subchannel_notify, 145 .sch_event = io_subchannel_sch_event,
139 .verify = io_subchannel_verify, 146 .chp_event = io_subchannel_chp_event,
140 .termination = io_subchannel_ioterm,
141 .probe = io_subchannel_probe, 147 .probe = io_subchannel_probe,
142 .remove = io_subchannel_remove, 148 .remove = io_subchannel_remove,
143 .shutdown = io_subchannel_shutdown, 149 .shutdown = io_subchannel_shutdown,
@@ -487,25 +493,22 @@ static int online_store_recog_and_online(struct ccw_device *cdev)
487 ccw_device_set_online(cdev); 493 ccw_device_set_online(cdev);
488 return 0; 494 return 0;
489} 495}
490static void online_store_handle_online(struct ccw_device *cdev, int force) 496static int online_store_handle_online(struct ccw_device *cdev, int force)
491{ 497{
492 int ret; 498 int ret;
493 499
494 ret = online_store_recog_and_online(cdev); 500 ret = online_store_recog_and_online(cdev);
495 if (ret) 501 if (ret)
496 return; 502 return ret;
497 if (force && cdev->private->state == DEV_STATE_BOXED) { 503 if (force && cdev->private->state == DEV_STATE_BOXED) {
498 ret = ccw_device_stlck(cdev); 504 ret = ccw_device_stlck(cdev);
499 if (ret) { 505 if (ret)
500 dev_warn(&cdev->dev, 506 return ret;
501 "ccw_device_stlck returned %d!\n", ret);
502 return;
503 }
504 if (cdev->id.cu_type == 0) 507 if (cdev->id.cu_type == 0)
505 cdev->private->state = DEV_STATE_NOT_OPER; 508 cdev->private->state = DEV_STATE_NOT_OPER;
506 online_store_recog_and_online(cdev); 509 online_store_recog_and_online(cdev);
507 } 510 }
508 511 return 0;
509} 512}
510 513
511static ssize_t online_store (struct device *dev, struct device_attribute *attr, 514static ssize_t online_store (struct device *dev, struct device_attribute *attr,
@@ -538,8 +541,9 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
538 ret = count; 541 ret = count;
539 break; 542 break;
540 case 1: 543 case 1:
541 online_store_handle_online(cdev, force); 544 ret = online_store_handle_online(cdev, force);
542 ret = count; 545 if (!ret)
546 ret = count;
543 break; 547 break;
544 default: 548 default:
545 ret = -EINVAL; 549 ret = -EINVAL;
@@ -584,19 +588,14 @@ static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
584static DEVICE_ATTR(online, 0644, online_show, online_store); 588static DEVICE_ATTR(online, 0644, online_show, online_store);
585static DEVICE_ATTR(availability, 0444, available_show, NULL); 589static DEVICE_ATTR(availability, 0444, available_show, NULL);
586 590
587static struct attribute * subch_attrs[] = { 591static struct attribute *io_subchannel_attrs[] = {
588 &dev_attr_chpids.attr, 592 &dev_attr_chpids.attr,
589 &dev_attr_pimpampom.attr, 593 &dev_attr_pimpampom.attr,
590 NULL, 594 NULL,
591}; 595};
592 596
593static struct attribute_group subch_attr_group = { 597static struct attribute_group io_subchannel_attr_group = {
594 .attrs = subch_attrs, 598 .attrs = io_subchannel_attrs,
595};
596
597struct attribute_group *subch_attr_groups[] = {
598 &subch_attr_group,
599 NULL,
600}; 599};
601 600
602static struct attribute * ccwdev_attrs[] = { 601static struct attribute * ccwdev_attrs[] = {
@@ -790,7 +789,7 @@ static void sch_attach_device(struct subchannel *sch,
790 sch_set_cdev(sch, cdev); 789 sch_set_cdev(sch, cdev);
791 cdev->private->schid = sch->schid; 790 cdev->private->schid = sch->schid;
792 cdev->ccwlock = sch->lock; 791 cdev->ccwlock = sch->lock;
793 device_trigger_reprobe(sch); 792 ccw_device_trigger_reprobe(cdev);
794 spin_unlock_irq(sch->lock); 793 spin_unlock_irq(sch->lock);
795} 794}
796 795
@@ -1037,7 +1036,6 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1037 struct ccw_device_private *priv; 1036 struct ccw_device_private *priv;
1038 1037
1039 sch_set_cdev(sch, cdev); 1038 sch_set_cdev(sch, cdev);
1040 sch->driver = &io_subchannel_driver;
1041 cdev->ccwlock = sch->lock; 1039 cdev->ccwlock = sch->lock;
1042 1040
1043 /* Init private data. */ 1041 /* Init private data. */
@@ -1122,8 +1120,33 @@ static void io_subchannel_irq(struct subchannel *sch)
1122 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1120 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1123} 1121}
1124 1122
1125static int 1123static void io_subchannel_init_fields(struct subchannel *sch)
1126io_subchannel_probe (struct subchannel *sch) 1124{
1125 if (cio_is_console(sch->schid))
1126 sch->opm = 0xff;
1127 else
1128 sch->opm = chp_get_sch_opm(sch);
1129 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1130 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1131
1132 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1133 " - PIM = %02X, PAM = %02X, POM = %02X\n",
1134 sch->schib.pmcw.dev, sch->schid.ssid,
1135 sch->schid.sch_no, sch->schib.pmcw.pim,
1136 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1137 /* Initially set up some fields in the pmcw. */
1138 sch->schib.pmcw.ena = 0;
1139 sch->schib.pmcw.csense = 1; /* concurrent sense */
1140 if ((sch->lpm & (sch->lpm - 1)) != 0)
1141 sch->schib.pmcw.mp = 1; /* multipath mode */
1142 /* clean up possible residual cmf stuff */
1143 sch->schib.pmcw.mme = 0;
1144 sch->schib.pmcw.mbfc = 0;
1145 sch->schib.pmcw.mbi = 0;
1146 sch->schib.mba = 0;
1147}
1148
1149static int io_subchannel_probe(struct subchannel *sch)
1127{ 1150{
1128 struct ccw_device *cdev; 1151 struct ccw_device *cdev;
1129 int rc; 1152 int rc;
@@ -1132,11 +1155,21 @@ io_subchannel_probe (struct subchannel *sch)
1132 1155
1133 cdev = sch_get_cdev(sch); 1156 cdev = sch_get_cdev(sch);
1134 if (cdev) { 1157 if (cdev) {
1158 rc = sysfs_create_group(&sch->dev.kobj,
1159 &io_subchannel_attr_group);
1160 if (rc)
1161 CIO_MSG_EVENT(0, "Failed to create io subchannel "
1162 "attributes for subchannel "
1163 "0.%x.%04x (rc=%d)\n",
1164 sch->schid.ssid, sch->schid.sch_no, rc);
1135 /* 1165 /*
1136 * This subchannel already has an associated ccw_device. 1166 * This subchannel already has an associated ccw_device.
1137 * Register it and exit. This happens for all early 1167 * Throw the delayed uevent for the subchannel, register
1138 * device, e.g. the console. 1168 * the ccw_device and exit. This happens for all early
1169 * devices, e.g. the console.
1139 */ 1170 */
1171 sch->dev.uevent_suppress = 0;
1172 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1140 cdev->dev.groups = ccwdev_attr_groups; 1173 cdev->dev.groups = ccwdev_attr_groups;
1141 device_initialize(&cdev->dev); 1174 device_initialize(&cdev->dev);
1142 ccw_device_register(cdev); 1175 ccw_device_register(cdev);
@@ -1152,17 +1185,24 @@ io_subchannel_probe (struct subchannel *sch)
1152 get_device(&cdev->dev); 1185 get_device(&cdev->dev);
1153 return 0; 1186 return 0;
1154 } 1187 }
1188 io_subchannel_init_fields(sch);
1155 /* 1189 /*
1156 * First check if a fitting device may be found amongst the 1190 * First check if a fitting device may be found amongst the
1157 * disconnected devices or in the orphanage. 1191 * disconnected devices or in the orphanage.
1158 */ 1192 */
1159 dev_id.devno = sch->schib.pmcw.dev; 1193 dev_id.devno = sch->schib.pmcw.dev;
1160 dev_id.ssid = sch->schid.ssid; 1194 dev_id.ssid = sch->schid.ssid;
1195 rc = sysfs_create_group(&sch->dev.kobj,
1196 &io_subchannel_attr_group);
1197 if (rc)
1198 return rc;
1161 /* Allocate I/O subchannel private data. */ 1199 /* Allocate I/O subchannel private data. */
1162 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1200 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1163 GFP_KERNEL | GFP_DMA); 1201 GFP_KERNEL | GFP_DMA);
1164 if (!sch->private) 1202 if (!sch->private) {
1165 return -ENOMEM; 1203 rc = -ENOMEM;
1204 goto out_err;
1205 }
1166 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1206 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1167 if (!cdev) 1207 if (!cdev)
1168 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1208 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@@ -1181,8 +1221,8 @@ io_subchannel_probe (struct subchannel *sch)
1181 } 1221 }
1182 cdev = io_subchannel_create_ccwdev(sch); 1222 cdev = io_subchannel_create_ccwdev(sch);
1183 if (IS_ERR(cdev)) { 1223 if (IS_ERR(cdev)) {
1184 kfree(sch->private); 1224 rc = PTR_ERR(cdev);
1185 return PTR_ERR(cdev); 1225 goto out_err;
1186 } 1226 }
1187 rc = io_subchannel_recog(cdev, sch); 1227 rc = io_subchannel_recog(cdev, sch);
1188 if (rc) { 1228 if (rc) {
@@ -1191,9 +1231,12 @@ io_subchannel_probe (struct subchannel *sch)
1191 spin_unlock_irqrestore(sch->lock, flags); 1231 spin_unlock_irqrestore(sch->lock, flags);
1192 if (cdev->dev.release) 1232 if (cdev->dev.release)
1193 cdev->dev.release(&cdev->dev); 1233 cdev->dev.release(&cdev->dev);
1194 kfree(sch->private); 1234 goto out_err;
1195 } 1235 }
1196 1236 return 0;
1237out_err:
1238 kfree(sch->private);
1239 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1197 return rc; 1240 return rc;
1198} 1241}
1199 1242
@@ -1214,6 +1257,7 @@ io_subchannel_remove (struct subchannel *sch)
1214 ccw_device_unregister(cdev); 1257 ccw_device_unregister(cdev);
1215 put_device(&cdev->dev); 1258 put_device(&cdev->dev);
1216 kfree(sch->private); 1259 kfree(sch->private);
1260 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1217 return 0; 1261 return 0;
1218} 1262}
1219 1263
@@ -1224,11 +1268,7 @@ static int io_subchannel_notify(struct subchannel *sch, int event)
1224 cdev = sch_get_cdev(sch); 1268 cdev = sch_get_cdev(sch);
1225 if (!cdev) 1269 if (!cdev)
1226 return 0; 1270 return 0;
1227 if (!cdev->drv) 1271 return ccw_device_notify(cdev, event);
1228 return 0;
1229 if (!cdev->online)
1230 return 0;
1231 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1232} 1272}
1233 1273
1234static void io_subchannel_verify(struct subchannel *sch) 1274static void io_subchannel_verify(struct subchannel *sch)
@@ -1240,22 +1280,96 @@ static void io_subchannel_verify(struct subchannel *sch)
1240 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1280 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1241} 1281}
1242 1282
1243static void io_subchannel_ioterm(struct subchannel *sch) 1283static int check_for_io_on_path(struct subchannel *sch, int mask)
1244{ 1284{
1245 struct ccw_device *cdev; 1285 int cc;
1246 1286
1247 cdev = sch_get_cdev(sch); 1287 cc = stsch(sch->schid, &sch->schib);
1248 if (!cdev) 1288 if (cc)
1249 return; 1289 return 0;
1250 /* Internal I/O will be retried by the interrupt handler. */ 1290 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1251 if (cdev->private->flags.intretry) 1291 return 1;
1292 return 0;
1293}
1294
1295static void terminate_internal_io(struct subchannel *sch,
1296 struct ccw_device *cdev)
1297{
1298 if (cio_clear(sch)) {
1299 /* Recheck device in case clear failed. */
1300 sch->lpm = 0;
1301 if (cdev->online)
1302 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1303 else
1304 css_schedule_eval(sch->schid);
1252 return; 1305 return;
1306 }
1253 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 1307 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1308 /* Request retry of internal operation. */
1309 cdev->private->flags.intretry = 1;
1310 /* Call handler. */
1254 if (cdev->handler) 1311 if (cdev->handler)
1255 cdev->handler(cdev, cdev->private->intparm, 1312 cdev->handler(cdev, cdev->private->intparm,
1256 ERR_PTR(-EIO)); 1313 ERR_PTR(-EIO));
1257} 1314}
1258 1315
1316static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1317{
1318 struct ccw_device *cdev;
1319
1320 cdev = sch_get_cdev(sch);
1321 if (!cdev)
1322 return;
1323 if (check_for_io_on_path(sch, mask)) {
1324 if (cdev->private->state == DEV_STATE_ONLINE)
1325 ccw_device_kill_io(cdev);
1326 else {
1327 terminate_internal_io(sch, cdev);
1328 /* Re-start path verification. */
1329 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1330 }
1331 } else
1332 /* trigger path verification. */
1333 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1334
1335}
1336
1337static int io_subchannel_chp_event(struct subchannel *sch,
1338 struct chp_link *link, int event)
1339{
1340 int mask;
1341
1342 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1343 if (!mask)
1344 return 0;
1345 switch (event) {
1346 case CHP_VARY_OFF:
1347 sch->opm &= ~mask;
1348 sch->lpm &= ~mask;
1349 io_subchannel_terminate_path(sch, mask);
1350 break;
1351 case CHP_VARY_ON:
1352 sch->opm |= mask;
1353 sch->lpm |= mask;
1354 io_subchannel_verify(sch);
1355 break;
1356 case CHP_OFFLINE:
1357 if (stsch(sch->schid, &sch->schib))
1358 return -ENXIO;
1359 if (!css_sch_is_valid(&sch->schib))
1360 return -ENODEV;
1361 io_subchannel_terminate_path(sch, mask);
1362 break;
1363 case CHP_ONLINE:
1364 if (stsch(sch->schid, &sch->schib))
1365 return -ENXIO;
1366 sch->lpm |= mask & sch->opm;
1367 io_subchannel_verify(sch);
1368 break;
1369 }
1370 return 0;
1371}
1372
1259static void 1373static void
1260io_subchannel_shutdown(struct subchannel *sch) 1374io_subchannel_shutdown(struct subchannel *sch)
1261{ 1375{
@@ -1285,6 +1399,195 @@ io_subchannel_shutdown(struct subchannel *sch)
1285 cio_disable_subchannel(sch); 1399 cio_disable_subchannel(sch);
1286} 1400}
1287 1401
1402static int io_subchannel_get_status(struct subchannel *sch)
1403{
1404 struct schib schib;
1405
1406 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1407 return CIO_GONE;
1408 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1409 return CIO_REVALIDATE;
1410 if (!sch->lpm)
1411 return CIO_NO_PATH;
1412 return CIO_OPER;
1413}
1414
1415static int device_is_disconnected(struct ccw_device *cdev)
1416{
1417 if (!cdev)
1418 return 0;
1419 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1420 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1421}
1422
1423static int recovery_check(struct device *dev, void *data)
1424{
1425 struct ccw_device *cdev = to_ccwdev(dev);
1426 int *redo = data;
1427
1428 spin_lock_irq(cdev->ccwlock);
1429 switch (cdev->private->state) {
1430 case DEV_STATE_DISCONNECTED:
1431 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1432 cdev->private->dev_id.ssid,
1433 cdev->private->dev_id.devno);
1434 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1435 *redo = 1;
1436 break;
1437 case DEV_STATE_DISCONNECTED_SENSE_ID:
1438 *redo = 1;
1439 break;
1440 }
1441 spin_unlock_irq(cdev->ccwlock);
1442
1443 return 0;
1444}
1445
1446static void recovery_work_func(struct work_struct *unused)
1447{
1448 int redo = 0;
1449
1450 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1451 if (redo) {
1452 spin_lock_irq(&recovery_lock);
1453 if (!timer_pending(&recovery_timer)) {
1454 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1455 recovery_phase++;
1456 mod_timer(&recovery_timer, jiffies +
1457 recovery_delay[recovery_phase] * HZ);
1458 }
1459 spin_unlock_irq(&recovery_lock);
1460 } else
1461 CIO_MSG_EVENT(4, "recovery: end\n");
1462}
1463
1464static DECLARE_WORK(recovery_work, recovery_work_func);
1465
1466static void recovery_func(unsigned long data)
1467{
1468 /*
1469 * We can't do our recovery in softirq context and it's not
1470 * performance critical, so we schedule it.
1471 */
1472 schedule_work(&recovery_work);
1473}
1474
1475static void ccw_device_schedule_recovery(void)
1476{
1477 unsigned long flags;
1478
1479 CIO_MSG_EVENT(4, "recovery: schedule\n");
1480 spin_lock_irqsave(&recovery_lock, flags);
1481 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1482 recovery_phase = 0;
1483 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1484 }
1485 spin_unlock_irqrestore(&recovery_lock, flags);
1486}
1487
1488static void device_set_disconnected(struct ccw_device *cdev)
1489{
1490 if (!cdev)
1491 return;
1492 ccw_device_set_timeout(cdev, 0);
1493 cdev->private->flags.fake_irb = 0;
1494 cdev->private->state = DEV_STATE_DISCONNECTED;
1495 if (cdev->online)
1496 ccw_device_schedule_recovery();
1497}
1498
1499static int io_subchannel_sch_event(struct subchannel *sch, int slow)
1500{
1501 int event, ret, disc;
1502 unsigned long flags;
1503 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
1504 struct ccw_device *cdev;
1505
1506 spin_lock_irqsave(sch->lock, flags);
1507 cdev = sch_get_cdev(sch);
1508 disc = device_is_disconnected(cdev);
1509 if (disc && slow) {
1510 /* Disconnected devices are evaluated directly only.*/
1511 spin_unlock_irqrestore(sch->lock, flags);
1512 return 0;
1513 }
1514 /* No interrupt after machine check - kill pending timers. */
1515 if (cdev)
1516 ccw_device_set_timeout(cdev, 0);
1517 if (!disc && !slow) {
1518 /* Non-disconnected devices are evaluated on the slow path. */
1519 spin_unlock_irqrestore(sch->lock, flags);
1520 return -EAGAIN;
1521 }
1522 event = io_subchannel_get_status(sch);
1523 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
1524 sch->schid.ssid, sch->schid.sch_no, event,
1525 disc ? "disconnected" : "normal",
1526 slow ? "slow" : "fast");
1527 /* Analyze subchannel status. */
1528 action = NONE;
1529 switch (event) {
1530 case CIO_NO_PATH:
1531 if (disc) {
1532 /* Check if paths have become available. */
1533 action = REPROBE;
1534 break;
1535 }
1536 /* fall through */
1537 case CIO_GONE:
1538 /* Prevent unwanted effects when opening lock. */
1539 cio_disable_subchannel(sch);
1540 device_set_disconnected(cdev);
1541 /* Ask driver what to do with device. */
1542 action = UNREGISTER;
1543 spin_unlock_irqrestore(sch->lock, flags);
1544 ret = io_subchannel_notify(sch, event);
1545 spin_lock_irqsave(sch->lock, flags);
1546 if (ret)
1547 action = NONE;
1548 break;
1549 case CIO_REVALIDATE:
1550 /* Device will be removed, so no notify necessary. */
1551 if (disc)
1552 /* Reprobe because immediate unregister might block. */
1553 action = REPROBE;
1554 else
1555 action = UNREGISTER_PROBE;
1556 break;
1557 case CIO_OPER:
1558 if (disc)
1559 /* Get device operational again. */
1560 action = REPROBE;
1561 break;
1562 }
1563 /* Perform action. */
1564 ret = 0;
1565 switch (action) {
1566 case UNREGISTER:
1567 case UNREGISTER_PROBE:
1568 /* Unregister device (will use subchannel lock). */
1569 spin_unlock_irqrestore(sch->lock, flags);
1570 css_sch_device_unregister(sch);
1571 spin_lock_irqsave(sch->lock, flags);
1572
1573 /* Reset intparm to zeroes. */
1574 sch->schib.pmcw.intparm = 0;
1575 cio_modify(sch);
1576 break;
1577 case REPROBE:
1578 ccw_device_trigger_reprobe(cdev);
1579 break;
1580 default:
1581 break;
1582 }
1583 spin_unlock_irqrestore(sch->lock, flags);
1584 /* Probe if necessary. */
1585 if (action == UNREGISTER_PROBE)
1586 ret = css_probe_device(sch->schid);
1587
1588 return ret;
1589}
1590
1288#ifdef CONFIG_CCW_CONSOLE 1591#ifdef CONFIG_CCW_CONSOLE
1289static struct ccw_device console_cdev; 1592static struct ccw_device console_cdev;
1290static struct ccw_device_private console_private; 1593static struct ccw_device_private console_private;
@@ -1297,14 +1600,16 @@ spinlock_t * cio_get_console_lock(void)
1297 return &ccw_console_lock; 1600 return &ccw_console_lock;
1298} 1601}
1299 1602
1300static int 1603static int ccw_device_console_enable(struct ccw_device *cdev,
1301ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) 1604 struct subchannel *sch)
1302{ 1605{
1303 int rc; 1606 int rc;
1304 1607
1305 /* Attach subchannel private data. */ 1608 /* Attach subchannel private data. */
1306 sch->private = cio_get_console_priv(); 1609 sch->private = cio_get_console_priv();
1307 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1610 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1611 io_subchannel_init_fields(sch);
1612 sch->driver = &io_subchannel_driver;
1308 /* Initialize the ccw_device structure. */ 1613 /* Initialize the ccw_device structure. */
1309 cdev->dev.parent= &sch->dev; 1614 cdev->dev.parent= &sch->dev;
1310 rc = io_subchannel_recog(cdev, sch); 1615 rc = io_subchannel_recog(cdev, sch);
@@ -1515,71 +1820,6 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
1515 return sch->schid; 1820 return sch->schid;
1516} 1821}
1517 1822
1518static int recovery_check(struct device *dev, void *data)
1519{
1520 struct ccw_device *cdev = to_ccwdev(dev);
1521 int *redo = data;
1522
1523 spin_lock_irq(cdev->ccwlock);
1524 switch (cdev->private->state) {
1525 case DEV_STATE_DISCONNECTED:
1526 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1527 cdev->private->dev_id.ssid,
1528 cdev->private->dev_id.devno);
1529 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1530 *redo = 1;
1531 break;
1532 case DEV_STATE_DISCONNECTED_SENSE_ID:
1533 *redo = 1;
1534 break;
1535 }
1536 spin_unlock_irq(cdev->ccwlock);
1537
1538 return 0;
1539}
1540
1541static void recovery_work_func(struct work_struct *unused)
1542{
1543 int redo = 0;
1544
1545 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1546 if (redo) {
1547 spin_lock_irq(&recovery_lock);
1548 if (!timer_pending(&recovery_timer)) {
1549 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1550 recovery_phase++;
1551 mod_timer(&recovery_timer, jiffies +
1552 recovery_delay[recovery_phase] * HZ);
1553 }
1554 spin_unlock_irq(&recovery_lock);
1555 } else
1556 CIO_MSG_EVENT(4, "recovery: end\n");
1557}
1558
1559static DECLARE_WORK(recovery_work, recovery_work_func);
1560
1561static void recovery_func(unsigned long data)
1562{
1563 /*
1564 * We can't do our recovery in softirq context and it's not
1565 * performance critical, so we schedule it.
1566 */
1567 schedule_work(&recovery_work);
1568}
1569
1570void ccw_device_schedule_recovery(void)
1571{
1572 unsigned long flags;
1573
1574 CIO_MSG_EVENT(4, "recovery: schedule\n");
1575 spin_lock_irqsave(&recovery_lock, flags);
1576 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1577 recovery_phase = 0;
1578 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1579 }
1580 spin_unlock_irqrestore(&recovery_lock, flags);
1581}
1582
1583MODULE_LICENSE("GPL"); 1823MODULE_LICENSE("GPL");
1584EXPORT_SYMBOL(ccw_device_set_online); 1824EXPORT_SYMBOL(ccw_device_set_online);
1585EXPORT_SYMBOL(ccw_device_set_offline); 1825EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index cb08092be39f..9800a8335a3f 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -88,8 +88,6 @@ int ccw_device_recognition(struct ccw_device *);
88int ccw_device_online(struct ccw_device *); 88int ccw_device_online(struct ccw_device *);
89int ccw_device_offline(struct ccw_device *); 89int ccw_device_offline(struct ccw_device *);
90 90
91void ccw_device_schedule_recovery(void);
92
93/* Function prototypes for device status and basic sense stuff. */ 91/* Function prototypes for device status and basic sense stuff. */
94void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 92void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
95void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); 93void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
@@ -118,6 +116,11 @@ int ccw_device_call_handler(struct ccw_device *);
118 116
119int ccw_device_stlck(struct ccw_device *); 117int ccw_device_stlck(struct ccw_device *);
120 118
119/* Helper function for machine check handling. */
120void ccw_device_trigger_reprobe(struct ccw_device *);
121void ccw_device_kill_io(struct ccw_device *);
122int ccw_device_notify(struct ccw_device *, int);
123
121/* qdio needs this. */ 124/* qdio needs this. */
122void ccw_device_set_timeout(struct ccw_device *, int); 125void ccw_device_set_timeout(struct ccw_device *, int);
123extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 126extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index e268d5a77c12..8b5fe57fb2f3 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -2,8 +2,7 @@
2 * drivers/s390/cio/device_fsm.c 2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling 3 * finite state machine for device handling
4 * 4 *
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 2002,2008
6 * IBM Corporation
7 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 8 */
@@ -27,65 +26,6 @@
27 26
28static int timeout_log_enabled; 27static int timeout_log_enabled;
29 28
30int
31device_is_online(struct subchannel *sch)
32{
33 struct ccw_device *cdev;
34
35 cdev = sch_get_cdev(sch);
36 if (!cdev)
37 return 0;
38 return (cdev->private->state == DEV_STATE_ONLINE);
39}
40
41int
42device_is_disconnected(struct subchannel *sch)
43{
44 struct ccw_device *cdev;
45
46 cdev = sch_get_cdev(sch);
47 if (!cdev)
48 return 0;
49 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51}
52
53void
54device_set_disconnected(struct subchannel *sch)
55{
56 struct ccw_device *cdev;
57
58 cdev = sch_get_cdev(sch);
59 if (!cdev)
60 return;
61 ccw_device_set_timeout(cdev, 0);
62 cdev->private->flags.fake_irb = 0;
63 cdev->private->state = DEV_STATE_DISCONNECTED;
64 if (cdev->online)
65 ccw_device_schedule_recovery();
66}
67
68void device_set_intretry(struct subchannel *sch)
69{
70 struct ccw_device *cdev;
71
72 cdev = sch_get_cdev(sch);
73 if (!cdev)
74 return;
75 cdev->private->flags.intretry = 1;
76}
77
78int device_trigger_verify(struct subchannel *sch)
79{
80 struct ccw_device *cdev;
81
82 cdev = sch_get_cdev(sch);
83 if (!cdev || !cdev->online)
84 return -EINVAL;
85 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
86 return 0;
87}
88
89static int __init ccw_timeout_log_setup(char *unused) 29static int __init ccw_timeout_log_setup(char *unused)
90{ 30{
91 timeout_log_enabled = 1; 31 timeout_log_enabled = 1;
@@ -99,31 +39,43 @@ static void ccw_timeout_log(struct ccw_device *cdev)
99 struct schib schib; 39 struct schib schib;
100 struct subchannel *sch; 40 struct subchannel *sch;
101 struct io_subchannel_private *private; 41 struct io_subchannel_private *private;
42 union orb *orb;
102 int cc; 43 int cc;
103 44
104 sch = to_subchannel(cdev->dev.parent); 45 sch = to_subchannel(cdev->dev.parent);
105 private = to_io_private(sch); 46 private = to_io_private(sch);
47 orb = &private->orb;
106 cc = stsch(sch->schid, &schib); 48 cc = stsch(sch->schid, &schib);
107 49
108 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 50 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
109 "device information:\n", get_clock()); 51 "device information:\n", get_clock());
110 printk(KERN_WARNING "cio: orb:\n"); 52 printk(KERN_WARNING "cio: orb:\n");
111 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 53 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
112 &private->orb, sizeof(private->orb), 0); 54 orb, sizeof(*orb), 0);
113 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); 55 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
114 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); 56 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
115 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 57 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
116 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 58 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
117 59
118 if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || 60 if (orb->tm.b) {
119 (void *)(addr_t)private->orb.cpa == cdev->private->iccws) 61 printk(KERN_WARNING "cio: orb indicates transport mode\n");
120 printk(KERN_WARNING "cio: last channel program (intern):\n"); 62 printk(KERN_WARNING "cio: last tcw:\n");
121 else 63 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
122 printk(KERN_WARNING "cio: last channel program:\n"); 64 (void *)(addr_t)orb->tm.tcw,
123 65 sizeof(struct tcw), 0);
124 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 66 } else {
125 (void *)(addr_t)private->orb.cpa, 67 printk(KERN_WARNING "cio: orb indicates command mode\n");
126 sizeof(struct ccw1), 0); 68 if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
69 (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
70 printk(KERN_WARNING "cio: last channel program "
71 "(intern):\n");
72 else
73 printk(KERN_WARNING "cio: last channel program:\n");
74
75 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
76 (void *)(addr_t)orb->cmd.cpa,
77 sizeof(struct ccw1), 0);
78 }
127 printk(KERN_WARNING "cio: ccw device state: %d\n", 79 printk(KERN_WARNING "cio: ccw device state: %d\n",
128 cdev->private->state); 80 cdev->private->state);
129 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 81 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
@@ -171,18 +123,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires)
171 add_timer(&cdev->private->timer); 123 add_timer(&cdev->private->timer);
172} 124}
173 125
174/* Kill any pending timers after machine check. */
175void
176device_kill_pending_timer(struct subchannel *sch)
177{
178 struct ccw_device *cdev;
179
180 cdev = sch_get_cdev(sch);
181 if (!cdev)
182 return;
183 ccw_device_set_timeout(cdev, 0);
184}
185
186/* 126/*
187 * Cancel running i/o. This is called repeatedly since halt/clear are 127 * Cancel running i/o. This is called repeatedly since halt/clear are
188 * asynchronous operations. We do one try with cio_cancel, two tries 128 * asynchronous operations. We do one try with cio_cancel, two tries
@@ -205,15 +145,18 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
205 /* Not operational -> done. */ 145 /* Not operational -> done. */
206 return 0; 146 return 0;
207 /* Stage 1: cancel io. */ 147 /* Stage 1: cancel io. */
208 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 148 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
209 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 149 !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
210 ret = cio_cancel(sch); 150 if (!scsw_is_tm(&sch->schib.scsw)) {
211 if (ret != -EINVAL) 151 ret = cio_cancel(sch);
212 return ret; 152 if (ret != -EINVAL)
213 /* cancel io unsuccessful. From now on it is asynchronous. */ 153 return ret;
154 }
155 /* cancel io unsuccessful or not applicable (transport mode).
156 * Continue with asynchronous instructions. */
214 cdev->private->iretry = 3; /* 3 halt retries. */ 157 cdev->private->iretry = 3; /* 3 halt retries. */
215 } 158 }
216 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 159 if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
217 /* Stage 2: halt io. */ 160 /* Stage 2: halt io. */
218 if (cdev->private->iretry) { 161 if (cdev->private->iretry) {
219 cdev->private->iretry--; 162 cdev->private->iretry--;
@@ -388,34 +331,30 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
388 } 331 }
389} 332}
390 333
334int ccw_device_notify(struct ccw_device *cdev, int event)
335{
336 if (!cdev->drv)
337 return 0;
338 if (!cdev->online)
339 return 0;
340 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
341}
342
391static void 343static void
392ccw_device_oper_notify(struct work_struct *work) 344ccw_device_oper_notify(struct work_struct *work)
393{ 345{
394 struct ccw_device_private *priv; 346 struct ccw_device_private *priv;
395 struct ccw_device *cdev; 347 struct ccw_device *cdev;
396 struct subchannel *sch;
397 int ret; 348 int ret;
398 unsigned long flags;
399 349
400 priv = container_of(work, struct ccw_device_private, kick_work); 350 priv = container_of(work, struct ccw_device_private, kick_work);
401 cdev = priv->cdev; 351 cdev = priv->cdev;
402 spin_lock_irqsave(cdev->ccwlock, flags); 352 ret = ccw_device_notify(cdev, CIO_OPER);
403 sch = to_subchannel(cdev->dev.parent);
404 if (sch->driver && sch->driver->notify) {
405 spin_unlock_irqrestore(cdev->ccwlock, flags);
406 ret = sch->driver->notify(sch, CIO_OPER);
407 spin_lock_irqsave(cdev->ccwlock, flags);
408 } else
409 ret = 0;
410 if (ret) { 353 if (ret) {
411 /* Reenable channel measurements, if needed. */ 354 /* Reenable channel measurements, if needed. */
412 spin_unlock_irqrestore(cdev->ccwlock, flags);
413 cmf_reenable(cdev); 355 cmf_reenable(cdev);
414 spin_lock_irqsave(cdev->ccwlock, flags);
415 wake_up(&cdev->private->wait_q); 356 wake_up(&cdev->private->wait_q);
416 } 357 } else
417 spin_unlock_irqrestore(cdev->ccwlock, flags);
418 if (!ret)
419 /* Driver doesn't want device back. */ 358 /* Driver doesn't want device back. */
420 ccw_device_do_unreg_rereg(work); 359 ccw_device_do_unreg_rereg(work);
421} 360}
@@ -621,10 +560,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
621 /* Deliver fake irb to device driver, if needed. */ 560 /* Deliver fake irb to device driver, if needed. */
622 if (cdev->private->flags.fake_irb) { 561 if (cdev->private->flags.fake_irb) {
623 memset(&cdev->private->irb, 0, sizeof(struct irb)); 562 memset(&cdev->private->irb, 0, sizeof(struct irb));
624 cdev->private->irb.scsw.cc = 1; 563 cdev->private->irb.scsw.cmd.cc = 1;
625 cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; 564 cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
626 cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; 565 cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
627 cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; 566 cdev->private->irb.scsw.cmd.stctl =
567 SCSW_STCTL_STATUS_PEND;
628 cdev->private->flags.fake_irb = 0; 568 cdev->private->flags.fake_irb = 0;
629 if (cdev->handler) 569 if (cdev->handler)
630 cdev->handler(cdev, cdev->private->intparm, 570 cdev->handler(cdev, cdev->private->intparm,
@@ -718,13 +658,10 @@ ccw_device_offline(struct ccw_device *cdev)
718 sch = to_subchannel(cdev->dev.parent); 658 sch = to_subchannel(cdev->dev.parent);
719 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) 659 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
720 return -ENODEV; 660 return -ENODEV;
721 if (cdev->private->state != DEV_STATE_ONLINE) { 661 if (scsw_actl(&sch->schib.scsw) != 0)
722 if (sch->schib.scsw.actl != 0)
723 return -EBUSY;
724 return -EINVAL;
725 }
726 if (sch->schib.scsw.actl != 0)
727 return -EBUSY; 662 return -EBUSY;
663 if (cdev->private->state != DEV_STATE_ONLINE)
664 return -EINVAL;
728 /* Are we doing path grouping? */ 665 /* Are we doing path grouping? */
729 if (!cdev->private->options.pgroup) { 666 if (!cdev->private->options.pgroup) {
730 /* No, set state offline immediately. */ 667 /* No, set state offline immediately. */
@@ -799,9 +736,9 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
799 */ 736 */
800 stsch(sch->schid, &sch->schib); 737 stsch(sch->schid, &sch->schib);
801 738
802 if (sch->schib.scsw.actl != 0 || 739 if (scsw_actl(&sch->schib.scsw) != 0 ||
803 (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || 740 (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
804 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 741 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
805 /* 742 /*
806 * No final status yet or final status not yet delivered 743 * No final status yet or final status not yet delivered
807 * to the device driver. Can't do path verfication now, 744 * to the device driver. Can't do path verfication now,
@@ -823,13 +760,13 @@ static void
823ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 760ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
824{ 761{
825 struct irb *irb; 762 struct irb *irb;
763 int is_cmd;
826 764
827 irb = (struct irb *) __LC_IRB; 765 irb = (struct irb *) __LC_IRB;
766 is_cmd = !scsw_is_tm(&irb->scsw);
828 /* Check for unsolicited interrupt. */ 767 /* Check for unsolicited interrupt. */
829 if ((irb->scsw.stctl == 768 if (!scsw_is_solicited(&irb->scsw)) {
830 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) 769 if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
831 && (!irb->scsw.cc)) {
832 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
833 !irb->esw.esw0.erw.cons) { 770 !irb->esw.esw0.erw.cons) {
834 /* Unit check but no sense data. Need basic sense. */ 771 /* Unit check but no sense data. Need basic sense. */
835 if (ccw_device_do_sense(cdev, irb) != 0) 772 if (ccw_device_do_sense(cdev, irb) != 0)
@@ -848,7 +785,7 @@ call_handler_unsol:
848 } 785 }
849 /* Accumulate status and find out if a basic sense is needed. */ 786 /* Accumulate status and find out if a basic sense is needed. */
850 ccw_device_accumulate_irb(cdev, irb); 787 ccw_device_accumulate_irb(cdev, irb);
851 if (cdev->private->flags.dosense) { 788 if (is_cmd && cdev->private->flags.dosense) {
852 if (ccw_device_do_sense(cdev, irb) == 0) { 789 if (ccw_device_do_sense(cdev, irb) == 0) {
853 cdev->private->state = DEV_STATE_W4SENSE; 790 cdev->private->state = DEV_STATE_W4SENSE;
854 } 791 }
@@ -892,9 +829,9 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
892 829
893 irb = (struct irb *) __LC_IRB; 830 irb = (struct irb *) __LC_IRB;
894 /* Check for unsolicited interrupt. */ 831 /* Check for unsolicited interrupt. */
895 if (irb->scsw.stctl == 832 if (scsw_stctl(&irb->scsw) ==
896 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 833 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
897 if (irb->scsw.cc == 1) 834 if (scsw_cc(&irb->scsw) == 1)
898 /* Basic sense hasn't started. Try again. */ 835 /* Basic sense hasn't started. Try again. */
899 ccw_device_do_sense(cdev, irb); 836 ccw_device_do_sense(cdev, irb);
900 else { 837 else {
@@ -912,7 +849,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
912 * only deliver the halt/clear interrupt to the device driver as if it 849 * only deliver the halt/clear interrupt to the device driver as if it
913 * had killed the original request. 850 * had killed the original request.
914 */ 851 */
915 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 852 if (scsw_fctl(&irb->scsw) &
853 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
916 /* Retry Basic Sense if requested. */ 854 /* Retry Basic Sense if requested. */
917 if (cdev->private->flags.intretry) { 855 if (cdev->private->flags.intretry) {
918 cdev->private->flags.intretry = 0; 856 cdev->private->flags.intretry = 0;
@@ -986,12 +924,10 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
986 ERR_PTR(-EIO)); 924 ERR_PTR(-EIO));
987} 925}
988 926
989void device_kill_io(struct subchannel *sch) 927void ccw_device_kill_io(struct ccw_device *cdev)
990{ 928{
991 int ret; 929 int ret;
992 struct ccw_device *cdev;
993 930
994 cdev = sch_get_cdev(sch);
995 ret = ccw_device_cancel_halt_clear(cdev); 931 ret = ccw_device_cancel_halt_clear(cdev);
996 if (ret == -EBUSY) { 932 if (ret == -EBUSY) {
997 ccw_device_set_timeout(cdev, 3*HZ); 933 ccw_device_set_timeout(cdev, 3*HZ);
@@ -1021,9 +957,9 @@ ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1021 case DEV_EVENT_INTERRUPT: 957 case DEV_EVENT_INTERRUPT:
1022 irb = (struct irb *) __LC_IRB; 958 irb = (struct irb *) __LC_IRB;
1023 /* Check for unsolicited interrupt. */ 959 /* Check for unsolicited interrupt. */
1024 if ((irb->scsw.stctl == 960 if ((scsw_stctl(&irb->scsw) ==
1025 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && 961 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1026 (!irb->scsw.cc)) 962 (!scsw_cc(&irb->scsw)))
1027 /* FIXME: we should restart stlck here, but this 963 /* FIXME: we should restart stlck here, but this
1028 * is extremely unlikely ... */ 964 * is extremely unlikely ... */
1029 goto out_wakeup; 965 goto out_wakeup;
@@ -1055,17 +991,14 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1055 ccw_device_sense_id_start(cdev); 991 ccw_device_sense_id_start(cdev);
1056} 992}
1057 993
1058void 994void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1059device_trigger_reprobe(struct subchannel *sch)
1060{ 995{
1061 struct ccw_device *cdev; 996 struct subchannel *sch;
1062 997
1063 cdev = sch_get_cdev(sch);
1064 if (!cdev)
1065 return;
1066 if (cdev->private->state != DEV_STATE_DISCONNECTED) 998 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1067 return; 999 return;
1068 1000
1001 sch = to_subchannel(cdev->dev.parent);
1069 /* Update some values. */ 1002 /* Update some values. */
1070 if (stsch(sch->schid, &sch->schib)) 1003 if (stsch(sch->schid, &sch->schib))
1071 return; 1004 return;
@@ -1081,7 +1014,6 @@ device_trigger_reprobe(struct subchannel *sch)
1081 sch->schib.pmcw.ena = 0; 1014 sch->schib.pmcw.ena = 0;
1082 if ((sch->lpm & (sch->lpm - 1)) != 0) 1015 if ((sch->lpm & (sch->lpm - 1)) != 0)
1083 sch->schib.pmcw.mp = 1; 1016 sch->schib.pmcw.mp = 1;
1084 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1085 /* We should also udate ssd info, but this has to wait. */ 1017 /* We should also udate ssd info, but this has to wait. */
1086 /* Check if this is another device which appeared on the same sch. */ 1018 /* Check if this is another device which appeared on the same sch. */
1087 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1019 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index cba7020517ed..1bdaa614e34f 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -196,7 +196,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
196 irb = &cdev->private->irb; 196 irb = &cdev->private->irb;
197 197
198 /* Check the error cases. */ 198 /* Check the error cases. */
199 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
200 /* Retry Sense ID if requested. */ 200 /* Retry Sense ID if requested. */
201 if (cdev->private->flags.intretry) { 201 if (cdev->private->flags.intretry) {
202 cdev->private->flags.intretry = 0; 202 cdev->private->flags.intretry = 0;
@@ -234,10 +234,10 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
234 irb->ecw[6], irb->ecw[7]); 234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN; 235 return -EAGAIN;
236 } 236 }
237 if (irb->scsw.cc == 3) { 237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm; 238 u8 lpm;
239 239
240 lpm = to_io_private(sch)->orb.lpm; 240 lpm = to_io_private(sch)->orb.cmd.lpm;
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x " 242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is " 243 "on subchannel 0.%x.%04x is "
@@ -248,9 +248,9 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
248 } 248 }
249 249
250 /* Did we get a proper answer ? */ 250 /* Did we get a proper answer ? */
251 if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF && 251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) { 252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.count < sizeof(struct senseid) - 8) 253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1; 254 cdev->private->flags.esid = 1;
255 return 0; /* Success */ 255 return 0; /* Success */
256 } 256 }
@@ -260,7 +260,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
260 "subchannel 0.%x.%04x returns status %02X%02X\n", 260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid, 261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no, 262 sch->schid.sch_no,
263 irb->scsw.dstat, irb->scsw.cstat); 263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 264 return -EAGAIN;
265} 265}
266 266
@@ -277,9 +277,9 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
277 sch = to_subchannel(cdev->dev.parent); 277 sch = to_subchannel(cdev->dev.parent);
278 irb = (struct irb *) __LC_IRB; 278 irb = (struct irb *) __LC_IRB;
279 /* Retry sense id, if needed. */ 279 /* Retry sense id, if needed. */
280 if (irb->scsw.stctl == 280 if (irb->scsw.cmd.stctl ==
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
282 if ((irb->scsw.cc == 1) || !irb->scsw.actl) { 282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) {
283 ret = __ccw_device_sense_id_start(cdev); 283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY) 284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret); 285 ccw_device_sense_id_done(cdev, ret);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f308ad55a6d5..ee1a28310fbb 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -17,6 +17,7 @@
17#include <asm/ccwdev.h> 17#include <asm/ccwdev.h>
18#include <asm/idals.h> 18#include <asm/idals.h>
19#include <asm/chpid.h> 19#include <asm/chpid.h>
20#include <asm/fcx.h>
20 21
21#include "cio.h" 22#include "cio.h"
22#include "cio_debug.h" 23#include "cio_debug.h"
@@ -179,8 +180,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
179 return -EBUSY; 180 return -EBUSY;
180 } 181 }
181 if (cdev->private->state != DEV_STATE_ONLINE || 182 if (cdev->private->state != DEV_STATE_ONLINE ||
182 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 183 ((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
183 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || 184 !(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
184 cdev->private->flags.doverify) 185 cdev->private->flags.doverify)
185 return -EBUSY; 186 return -EBUSY;
186 ret = cio_set_options (sch, flags); 187 ret = cio_set_options (sch, flags);
@@ -379,7 +380,7 @@ int ccw_device_resume(struct ccw_device *cdev)
379 if (cdev->private->state == DEV_STATE_NOT_OPER) 380 if (cdev->private->state == DEV_STATE_NOT_OPER)
380 return -ENODEV; 381 return -ENODEV;
381 if (cdev->private->state != DEV_STATE_ONLINE || 382 if (cdev->private->state != DEV_STATE_ONLINE ||
382 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) 383 !(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
383 return -EINVAL; 384 return -EINVAL;
384 return cio_resume(sch); 385 return cio_resume(sch);
385} 386}
@@ -404,7 +405,7 @@ ccw_device_call_handler(struct ccw_device *cdev)
404 * - fast notification was requested (primary status) 405 * - fast notification was requested (primary status)
405 * - unsolicited interrupts 406 * - unsolicited interrupts
406 */ 407 */
407 stctl = cdev->private->irb.scsw.stctl; 408 stctl = scsw_stctl(&cdev->private->irb.scsw);
408 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || 409 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
409 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || 410 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
410 (stctl == SCSW_STCTL_STATUS_PEND); 411 (stctl == SCSW_STCTL_STATUS_PEND);
@@ -528,14 +529,15 @@ ccw_device_stlck(struct ccw_device *cdev)
528 cio_disable_subchannel(sch); //FIXME: return code? 529 cio_disable_subchannel(sch); //FIXME: return code?
529 goto out_unlock; 530 goto out_unlock;
530 } 531 }
531 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; 532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
532 spin_unlock_irqrestore(sch->lock, flags); 533 spin_unlock_irqrestore(sch->lock, flags);
533 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); 534 wait_event(cdev->private->wait_q,
535 cdev->private->irb.scsw.cmd.actl == 0);
534 spin_lock_irqsave(sch->lock, flags); 536 spin_lock_irqsave(sch->lock, flags);
535 cio_disable_subchannel(sch); //FIXME: return code? 537 cio_disable_subchannel(sch); //FIXME: return code?
536 if ((cdev->private->irb.scsw.dstat != 538 if ((cdev->private->irb.scsw.cmd.dstat !=
537 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
538 (cdev->private->irb.scsw.cstat != 0)) 540 (cdev->private->irb.scsw.cmd.cstat != 0))
539 ret = -EIO; 541 ret = -EIO;
540 /* Clear irb. */ 542 /* Clear irb. */
541 memset(&cdev->private->irb, 0, sizeof(struct irb)); 543 memset(&cdev->private->irb, 0, sizeof(struct irb));
@@ -568,6 +570,122 @@ void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
568} 570}
569EXPORT_SYMBOL(ccw_device_get_id); 571EXPORT_SYMBOL(ccw_device_get_id);
570 572
573/**
574 * ccw_device_tm_start_key - perform start function
575 * @cdev: ccw device on which to perform the start function
576 * @tcw: transport-command word to be started
577 * @intparm: user defined parameter to be passed to the interrupt handler
578 * @lpm: mask of paths to use
579 * @key: storage key to use for storage access
580 *
581 * Start the tcw on the given ccw device. Return zero on success, non-zero
582 * otherwise.
583 */
584int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
585 unsigned long intparm, u8 lpm, u8 key)
586{
587 struct subchannel *sch;
588 int rc;
589
590 sch = to_subchannel(cdev->dev.parent);
591 if (cdev->private->state != DEV_STATE_ONLINE)
592 return -EIO;
593 /* Adjust requested path mask to excluded varied off paths. */
594 if (lpm) {
595 lpm &= sch->opm;
596 if (lpm == 0)
597 return -EACCES;
598 }
599 rc = cio_tm_start_key(sch, tcw, lpm, key);
600 if (rc == 0)
601 cdev->private->intparm = intparm;
602 return rc;
603}
604EXPORT_SYMBOL(ccw_device_tm_start_key);
605
606/**
607 * ccw_device_tm_start_timeout_key - perform start function
608 * @cdev: ccw device on which to perform the start function
609 * @tcw: transport-command word to be started
610 * @intparm: user defined parameter to be passed to the interrupt handler
611 * @lpm: mask of paths to use
612 * @key: storage key to use for storage access
613 * @expires: time span in jiffies after which to abort request
614 *
615 * Start the tcw on the given ccw device. Return zero on success, non-zero
616 * otherwise.
617 */
618int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
619 unsigned long intparm, u8 lpm, u8 key,
620 int expires)
621{
622 int ret;
623
624 ccw_device_set_timeout(cdev, expires);
625 ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
626 if (ret != 0)
627 ccw_device_set_timeout(cdev, 0);
628 return ret;
629}
630EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
631
632/**
633 * ccw_device_tm_start - perform start function
634 * @cdev: ccw device on which to perform the start function
635 * @tcw: transport-command word to be started
636 * @intparm: user defined parameter to be passed to the interrupt handler
637 * @lpm: mask of paths to use
638 *
639 * Start the tcw on the given ccw device. Return zero on success, non-zero
640 * otherwise.
641 */
642int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
643 unsigned long intparm, u8 lpm)
644{
645 return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
646 PAGE_DEFAULT_KEY);
647}
648EXPORT_SYMBOL(ccw_device_tm_start);
649
650/**
651 * ccw_device_tm_start_timeout - perform start function
652 * @cdev: ccw device on which to perform the start function
653 * @tcw: transport-command word to be started
654 * @intparm: user defined parameter to be passed to the interrupt handler
655 * @lpm: mask of paths to use
656 * @expires: time span in jiffies after which to abort request
657 *
658 * Start the tcw on the given ccw device. Return zero on success, non-zero
659 * otherwise.
660 */
661int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
662 unsigned long intparm, u8 lpm, int expires)
663{
664 return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
665 PAGE_DEFAULT_KEY, expires);
666}
667EXPORT_SYMBOL(ccw_device_tm_start_timeout);
668
669/**
670 * ccw_device_tm_intrg - perform interrogate function
671 * @cdev: ccw device on which to perform the interrogate function
672 *
673 * Perform an interrogate function on the given ccw device. Return zero on
674 * success, non-zero otherwise.
675 */
676int ccw_device_tm_intrg(struct ccw_device *cdev)
677{
678 struct subchannel *sch = to_subchannel(cdev->dev.parent);
679
680 if (cdev->private->state != DEV_STATE_ONLINE)
681 return -EIO;
682 if (!scsw_is_tm(&sch->schib.scsw) ||
683 !(scsw_actl(&sch->schib.scsw) | SCSW_ACTL_START_PEND))
684 return -EINVAL;
685 return cio_tm_intrg(sch);
686}
687EXPORT_SYMBOL(ccw_device_tm_intrg);
688
571// FIXME: these have to go: 689// FIXME: these have to go:
572 690
573int 691int
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 5cf7be008e98..86bc94eb607f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -28,13 +28,13 @@
28 * Helper function called from interrupt context to decide whether an 28 * Helper function called from interrupt context to decide whether an
29 * operation should be tried again. 29 * operation should be tried again.
30 */ 30 */
31static int __ccw_device_should_retry(struct scsw *scsw) 31static int __ccw_device_should_retry(union scsw *scsw)
32{ 32{
33 /* CC is only valid if start function bit is set. */ 33 /* CC is only valid if start function bit is set. */
34 if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) 34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1)
35 return 1; 35 return 1;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 36 /* No more activity. For sense and set PGID we stubbornly try again. */
37 if (!scsw->actl) 37 if (!scsw->cmd.actl)
38 return 1; 38 return 1;
39 return 0; 39 return 0;
40} 40}
@@ -125,7 +125,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
125 125
126 sch = to_subchannel(cdev->dev.parent); 126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb; 127 irb = &cdev->private->irb;
128 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */ 129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) { 130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0; 131 cdev->private->flags.intretry = 0;
@@ -155,10 +155,10 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
155 irb->ecw[6], irb->ecw[7]); 155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN; 156 return -EAGAIN;
157 } 157 }
158 if (irb->scsw.cc == 3) { 158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm; 159 u8 lpm;
160 160
161 lpm = to_io_private(sch)->orb.lpm; 161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -188,7 +188,7 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
188 188
189 irb = (struct irb *) __LC_IRB; 189 irb = (struct irb *) __LC_IRB;
190 190
191 if (irb->scsw.stctl == 191 if (irb->scsw.cmd.stctl ==
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
193 if (__ccw_device_should_retry(&irb->scsw)) { 193 if (__ccw_device_should_retry(&irb->scsw)) {
194 ret = __ccw_device_sense_pgid_start(cdev); 194 ret = __ccw_device_sense_pgid_start(cdev);
@@ -331,7 +331,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
331 331
332 sch = to_subchannel(cdev->dev.parent); 332 sch = to_subchannel(cdev->dev.parent);
333 irb = &cdev->private->irb; 333 irb = &cdev->private->irb;
334 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
335 /* Retry Set PGID if requested. */ 335 /* Retry Set PGID if requested. */
336 if (cdev->private->flags.intretry) { 336 if (cdev->private->flags.intretry) {
337 cdev->private->flags.intretry = 0; 337 cdev->private->flags.intretry = 0;
@@ -355,7 +355,7 @@ __ccw_device_check_pgid(struct ccw_device *cdev)
355 irb->ecw[6], irb->ecw[7]); 355 irb->ecw[6], irb->ecw[7]);
356 return -EAGAIN; 356 return -EAGAIN;
357 } 357 }
358 if (irb->scsw.cc == 3) { 358 if (irb->scsw.cmd.cc == 3) {
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x,"
360 " lpm %02X, became 'not operational'\n", 360 " lpm %02X, became 'not operational'\n",
361 cdev->private->dev_id.devno, sch->schid.ssid, 361 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -376,7 +376,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
376 376
377 sch = to_subchannel(cdev->dev.parent); 377 sch = to_subchannel(cdev->dev.parent);
378 irb = &cdev->private->irb; 378 irb = &cdev->private->irb;
379 if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
380 /* Retry NOP if requested. */ 380 /* Retry NOP if requested. */
381 if (cdev->private->flags.intretry) { 381 if (cdev->private->flags.intretry) {
382 cdev->private->flags.intretry = 0; 382 cdev->private->flags.intretry = 0;
@@ -384,7 +384,7 @@ static int __ccw_device_check_nop(struct ccw_device *cdev)
384 } 384 }
385 return -ETIME; 385 return -ETIME;
386 } 386 }
387 if (irb->scsw.cc == 3) { 387 if (irb->scsw.cmd.cc == 3) {
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x,"
389 " lpm %02X, became 'not operational'\n", 389 " lpm %02X, became 'not operational'\n",
390 cdev->private->dev_id.devno, sch->schid.ssid, 390 cdev->private->dev_id.devno, sch->schid.ssid,
@@ -438,7 +438,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
438 438
439 irb = (struct irb *) __LC_IRB; 439 irb = (struct irb *) __LC_IRB;
440 440
441 if (irb->scsw.stctl == 441 if (irb->scsw.cmd.stctl ==
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
443 if (__ccw_device_should_retry(&irb->scsw)) 443 if (__ccw_device_should_retry(&irb->scsw))
444 __ccw_device_verify_start(cdev); 444 __ccw_device_verify_start(cdev);
@@ -544,7 +544,7 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
544 544
545 irb = (struct irb *) __LC_IRB; 545 irb = (struct irb *) __LC_IRB;
546 546
547 if (irb->scsw.stctl == 547 if (irb->scsw.cmd.stctl ==
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
549 if (__ccw_device_should_retry(&irb->scsw)) 549 if (__ccw_device_should_retry(&irb->scsw))
550 __ccw_device_disband_start(cdev); 550 __ccw_device_disband_start(cdev);
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 4a38993000f2..1b03c5423be2 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -29,9 +29,11 @@
29static void 29static void
30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) 30ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
31{ 31{
32 if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 32 char dbf_text[15];
33 SCHN_STAT_CHN_CTRL_CHK | 33
34 SCHN_STAT_INTF_CTRL_CHK))) 34 if (!scsw_is_valid_cstat(&irb->scsw) ||
35 !(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
36 SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
35 return; 37 return;
36 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " 38 CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
37 "received" 39 "received"
@@ -39,15 +41,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
39 ": %02X sch_stat : %02X\n", 41 ": %02X sch_stat : %02X\n",
40 cdev->private->dev_id.devno, cdev->private->schid.ssid, 42 cdev->private->dev_id.devno, cdev->private->schid.ssid,
41 cdev->private->schid.sch_no, 43 cdev->private->schid.sch_no,
42 irb->scsw.dstat, irb->scsw.cstat); 44 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
43 45 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
44 if (irb->scsw.cc != 3) { 46 CIO_TRACE_EVENT(0, dbf_text);
45 char dbf_text[15]; 47 CIO_HEX_EVENT(0, irb, sizeof(struct irb));
46
47 sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
48 CIO_TRACE_EVENT(0, dbf_text);
49 CIO_HEX_EVENT(0, irb, sizeof (struct irb));
50 }
51} 48}
52 49
53/* 50/*
@@ -81,12 +78,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
81 * are condition that have to be met for the extended control 78 * are condition that have to be met for the extended control
82 * bit to have meaning. Sick. 79 * bit to have meaning. Sick.
83 */ 80 */
84 cdev->private->irb.scsw.ectl = 0; 81 cdev->private->irb.scsw.cmd.ectl = 0;
85 if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && 82 if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
86 !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) 83 !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
87 cdev->private->irb.scsw.ectl = irb->scsw.ectl; 84 cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
88 /* Check if extended control word is valid. */ 85 /* Check if extended control word is valid. */
89 if (!cdev->private->irb.scsw.ectl) 86 if (!cdev->private->irb.scsw.cmd.ectl)
90 return; 87 return;
91 /* Copy concurrent sense / model dependent information. */ 88 /* Copy concurrent sense / model dependent information. */
92 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); 89 memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
@@ -98,11 +95,12 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
98static int 95static int
99ccw_device_accumulate_esw_valid(struct irb *irb) 96ccw_device_accumulate_esw_valid(struct irb *irb)
100{ 97{
101 if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) 98 if (!irb->scsw.cmd.eswf &&
99 (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
102 return 0; 100 return 0;
103 if (irb->scsw.stctl == 101 if (irb->scsw.cmd.stctl ==
104 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && 102 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
105 !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 103 !(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
106 return 0; 104 return 0;
107 return 1; 105 return 1;
108} 106}
@@ -125,7 +123,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
125 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; 123 cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
126 124
127 /* Copy subchannel logout information if esw is of format 0. */ 125 /* Copy subchannel logout information if esw is of format 0. */
128 if (irb->scsw.eswf) { 126 if (irb->scsw.cmd.eswf) {
129 cdev_sublog = &cdev_irb->esw.esw0.sublog; 127 cdev_sublog = &cdev_irb->esw.esw0.sublog;
130 sublog = &irb->esw.esw0.sublog; 128 sublog = &irb->esw.esw0.sublog;
131 /* Copy extended status flags. */ 129 /* Copy extended status flags. */
@@ -134,7 +132,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
134 * Copy fields that have a meaning for channel data check 132 * Copy fields that have a meaning for channel data check
135 * channel control check and interface control check. 133 * channel control check and interface control check.
136 */ 134 */
137 if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | 135 if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
138 SCHN_STAT_CHN_CTRL_CHK | 136 SCHN_STAT_CHN_CTRL_CHK |
139 SCHN_STAT_INTF_CTRL_CHK)) { 137 SCHN_STAT_INTF_CTRL_CHK)) {
140 /* Copy ancillary report bit. */ 138 /* Copy ancillary report bit. */
@@ -155,7 +153,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
155 /* Copy i/o-error alert. */ 153 /* Copy i/o-error alert. */
156 cdev_sublog->ioerr = sublog->ioerr; 154 cdev_sublog->ioerr = sublog->ioerr;
157 /* Copy channel path timeout bit. */ 155 /* Copy channel path timeout bit. */
158 if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) 156 if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
159 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; 157 cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
160 /* Copy failing storage address validity flag. */ 158 /* Copy failing storage address validity flag. */
161 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; 159 cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
@@ -200,24 +198,24 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
200 * If not, the remaining bit have no meaning and we must ignore them. 198 * If not, the remaining bit have no meaning and we must ignore them.
201 * The esw is not meaningful as well... 199 * The esw is not meaningful as well...
202 */ 200 */
203 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 201 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
204 return; 202 return;
205 203
206 /* Check for channel checks and interface control checks. */ 204 /* Check for channel checks and interface control checks. */
207 ccw_device_msg_control_check(cdev, irb); 205 ccw_device_msg_control_check(cdev, irb);
208 206
209 /* Check for path not operational. */ 207 /* Check for path not operational. */
210 if (irb->scsw.pno && irb->scsw.fctl != 0 && 208 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
211 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
212 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
213 ccw_device_path_notoper(cdev); 209 ccw_device_path_notoper(cdev);
214 210 /* No irb accumulation for transport mode irbs. */
211 if (scsw_is_tm(&irb->scsw)) {
212 memcpy(&cdev->private->irb, irb, sizeof(struct irb));
213 return;
214 }
215 /* 215 /*
216 * Don't accumulate unsolicited interrupts. 216 * Don't accumulate unsolicited interrupts.
217 */ 217 */
218 if ((irb->scsw.stctl == 218 if (!scsw_is_solicited(&irb->scsw))
219 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
220 (!irb->scsw.cc))
221 return; 219 return;
222 220
223 cdev_irb = &cdev->private->irb; 221 cdev_irb = &cdev->private->irb;
@@ -227,62 +225,63 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
227 * status at the subchannel has been cleared and we must not pass 225 * status at the subchannel has been cleared and we must not pass
228 * intermediate accumulated status to the device driver. 226 * intermediate accumulated status to the device driver.
229 */ 227 */
230 if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) 228 if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
231 memset(&cdev->private->irb, 0, sizeof(struct irb)); 229 memset(&cdev->private->irb, 0, sizeof(struct irb));
232 230
233 /* Copy bits which are valid only for the start function. */ 231 /* Copy bits which are valid only for the start function. */
234 if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { 232 if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
235 /* Copy key. */ 233 /* Copy key. */
236 cdev_irb->scsw.key = irb->scsw.key; 234 cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
237 /* Copy suspend control bit. */ 235 /* Copy suspend control bit. */
238 cdev_irb->scsw.sctl = irb->scsw.sctl; 236 cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
239 /* Accumulate deferred condition code. */ 237 /* Accumulate deferred condition code. */
240 cdev_irb->scsw.cc |= irb->scsw.cc; 238 cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
241 /* Copy ccw format bit. */ 239 /* Copy ccw format bit. */
242 cdev_irb->scsw.fmt = irb->scsw.fmt; 240 cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
243 /* Copy prefetch bit. */ 241 /* Copy prefetch bit. */
244 cdev_irb->scsw.pfch = irb->scsw.pfch; 242 cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
245 /* Copy initial-status-interruption-control. */ 243 /* Copy initial-status-interruption-control. */
246 cdev_irb->scsw.isic = irb->scsw.isic; 244 cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
247 /* Copy address limit checking control. */ 245 /* Copy address limit checking control. */
248 cdev_irb->scsw.alcc = irb->scsw.alcc; 246 cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
249 /* Copy suppress suspend bit. */ 247 /* Copy suppress suspend bit. */
250 cdev_irb->scsw.ssi = irb->scsw.ssi; 248 cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
251 } 249 }
252 250
253 /* Take care of the extended control bit and extended control word. */ 251 /* Take care of the extended control bit and extended control word. */
254 ccw_device_accumulate_ecw(cdev, irb); 252 ccw_device_accumulate_ecw(cdev, irb);
255 253
256 /* Accumulate function control. */ 254 /* Accumulate function control. */
257 cdev_irb->scsw.fctl |= irb->scsw.fctl; 255 cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
258 /* Copy activity control. */ 256 /* Copy activity control. */
259 cdev_irb->scsw.actl= irb->scsw.actl; 257 cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
260 /* Accumulate status control. */ 258 /* Accumulate status control. */
261 cdev_irb->scsw.stctl |= irb->scsw.stctl; 259 cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
262 /* 260 /*
263 * Copy ccw address if it is valid. This is a bit simplified 261 * Copy ccw address if it is valid. This is a bit simplified
264 * but should be close enough for all practical purposes. 262 * but should be close enough for all practical purposes.
265 */ 263 */
266 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || 264 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
267 ((irb->scsw.stctl == 265 ((irb->scsw.cmd.stctl ==
268 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && 266 (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
269 (irb->scsw.actl & SCSW_ACTL_DEVACT) && 267 (irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
270 (irb->scsw.actl & SCSW_ACTL_SCHACT)) || 268 (irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
271 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) 269 (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
272 cdev_irb->scsw.cpa = irb->scsw.cpa; 270 cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
273 /* Accumulate device status, but not the device busy flag. */ 271 /* Accumulate device status, but not the device busy flag. */
274 cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; 272 cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
275 /* dstat is not always valid. */ 273 /* dstat is not always valid. */
276 if (irb->scsw.stctl & 274 if (irb->scsw.cmd.stctl &
277 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS 275 (SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
278 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS)) 276 | SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
279 cdev_irb->scsw.dstat |= irb->scsw.dstat; 277 cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
280 /* Accumulate subchannel status. */ 278 /* Accumulate subchannel status. */
281 cdev_irb->scsw.cstat |= irb->scsw.cstat; 279 cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
282 /* Copy residual count if it is valid. */ 280 /* Copy residual count if it is valid. */
283 if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && 281 if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
284 (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) 282 (irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
285 cdev_irb->scsw.count = irb->scsw.count; 283 == 0)
284 cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
286 285
287 /* Take care of bits in the extended status word. */ 286 /* Take care of bits in the extended status word. */
288 ccw_device_accumulate_esw(cdev, irb); 287 ccw_device_accumulate_esw(cdev, irb);
@@ -299,7 +298,7 @@ ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
299 * sense facility available/supported when enabling the 298 * sense facility available/supported when enabling the
300 * concurrent sense facility. 299 * concurrent sense facility.
301 */ 300 */
302 if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 301 if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
303 !(cdev_irb->esw.esw0.erw.cons)) 302 !(cdev_irb->esw.esw0.erw.cons))
304 cdev->private->flags.dosense = 1; 303 cdev->private->flags.dosense = 1;
305} 304}
@@ -317,7 +316,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
317 sch = to_subchannel(cdev->dev.parent); 316 sch = to_subchannel(cdev->dev.parent);
318 317
319 /* A sense is required, can we do it now ? */ 318 /* A sense is required, can we do it now ? */
320 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 319 if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
321 /* 320 /*
322 * we received an Unit Check but we have no final 321 * we received an Unit Check but we have no final
323 * status yet, therefore we must delay the SENSE 322 * status yet, therefore we must delay the SENSE
@@ -355,20 +354,18 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
355 * If not, the remaining bit have no meaning and we must ignore them. 354 * If not, the remaining bit have no meaning and we must ignore them.
356 * The esw is not meaningful as well... 355 * The esw is not meaningful as well...
357 */ 356 */
358 if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) 357 if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
359 return; 358 return;
360 359
361 /* Check for channel checks and interface control checks. */ 360 /* Check for channel checks and interface control checks. */
362 ccw_device_msg_control_check(cdev, irb); 361 ccw_device_msg_control_check(cdev, irb);
363 362
364 /* Check for path not operational. */ 363 /* Check for path not operational. */
365 if (irb->scsw.pno && irb->scsw.fctl != 0 && 364 if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
366 (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) ||
367 (irb->scsw.actl & SCSW_ACTL_SUSPENDED)))
368 ccw_device_path_notoper(cdev); 365 ccw_device_path_notoper(cdev);
369 366
370 if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 367 if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
371 (irb->scsw.dstat & DEV_STAT_CHN_END)) { 368 (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
372 cdev->private->irb.esw.esw0.erw.cons = 1; 369 cdev->private->irb.esw.esw0.erw.cons = 1;
373 cdev->private->flags.dosense = 0; 370 cdev->private->flags.dosense = 0;
374 } 371 }
@@ -386,11 +383,11 @@ int
386ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) 383ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
387{ 384{
388 ccw_device_accumulate_irb(cdev, irb); 385 ccw_device_accumulate_irb(cdev, irb);
389 if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) 386 if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
390 return -EBUSY; 387 return -EBUSY;
391 /* Check for basic sense. */ 388 /* Check for basic sense. */
392 if (cdev->private->flags.dosense && 389 if (cdev->private->flags.dosense &&
393 !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { 390 !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
394 cdev->private->irb.esw.esw0.erw.cons = 1; 391 cdev->private->irb.esw.esw0.erw.cons = 1;
395 cdev->private->flags.dosense = 0; 392 cdev->private->flags.dosense = 0;
396 return 0; 393 return 0;
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
new file mode 100644
index 000000000000..61677dfbdc9b
--- /dev/null
+++ b/drivers/s390/cio/fcx.c
@@ -0,0 +1,350 @@
1/*
2 * Functions for assembling fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include "cio.h"
16
17/**
18 * tcw_get_intrg - return pointer to associated interrogate tcw
19 * @tcw: pointer to the original tcw
20 *
21 * Return a pointer to the interrogate tcw associated with the specified tcw
22 * or %NULL if there is no associated interrogate tcw.
23 */
24struct tcw *tcw_get_intrg(struct tcw *tcw)
25{
26 return (struct tcw *) ((addr_t) tcw->intrg);
27}
28EXPORT_SYMBOL(tcw_get_intrg);
29
30/**
31 * tcw_get_data - return pointer to input/output data associated with tcw
32 * @tcw: pointer to the tcw
33 *
34 * Return the input or output data address specified in the tcw depending
35 * on whether the r-bit or the w-bit is set. If neither bit is set, return
36 * %NULL.
37 */
38void *tcw_get_data(struct tcw *tcw)
39{
40 if (tcw->r)
41 return (void *) ((addr_t) tcw->input);
42 if (tcw->w)
43 return (void *) ((addr_t) tcw->output);
44 return NULL;
45}
46EXPORT_SYMBOL(tcw_get_data);
47
48/**
49 * tcw_get_tccb - return pointer to tccb associated with tcw
50 * @tcw: pointer to the tcw
51 *
52 * Return pointer to the tccb associated with this tcw.
53 */
54struct tccb *tcw_get_tccb(struct tcw *tcw)
55{
56 return (struct tccb *) ((addr_t) tcw->tccb);
57}
58EXPORT_SYMBOL(tcw_get_tccb);
59
60/**
61 * tcw_get_tsb - return pointer to tsb associated with tcw
62 * @tcw: pointer to the tcw
63 *
64 * Return pointer to the tsb associated with this tcw.
65 */
66struct tsb *tcw_get_tsb(struct tcw *tcw)
67{
68 return (struct tsb *) ((addr_t) tcw->tsb);
69}
70EXPORT_SYMBOL(tcw_get_tsb);
71
72/**
73 * tcw_init - initialize tcw data structure
74 * @tcw: pointer to the tcw to be initialized
75 * @r: initial value of the r-bit
76 * @w: initial value of the w-bit
77 *
78 * Initialize all fields of the specified tcw data structure with zero and
79 * fill in the format, flags, r and w fields.
80 */
81void tcw_init(struct tcw *tcw, int r, int w)
82{
83 memset(tcw, 0, sizeof(struct tcw));
84 tcw->format = TCW_FORMAT_DEFAULT;
85 tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
86 if (r)
87 tcw->r = 1;
88 if (w)
89 tcw->w = 1;
90}
91EXPORT_SYMBOL(tcw_init);
92
93static inline size_t tca_size(struct tccb *tccb)
94{
95 return tccb->tcah.tcal - 12;
96}
97
98static u32 calc_dcw_count(struct tccb *tccb)
99{
100 int offset;
101 struct dcw *dcw;
102 u32 count = 0;
103 size_t size;
104
105 size = tca_size(tccb);
106 for (offset = 0; offset < size;) {
107 dcw = (struct dcw *) &tccb->tca[offset];
108 count += dcw->count;
109 if (!(dcw->flags & DCW_FLAGS_CC))
110 break;
111 offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
112 }
113 return count;
114}
115
116static u32 calc_cbc_size(struct tidaw *tidaw, int num)
117{
118 int i;
119 u32 cbc_data;
120 u32 cbc_count = 0;
121 u64 data_count = 0;
122
123 for (i = 0; i < num; i++) {
124 if (tidaw[i].flags & TIDAW_FLAGS_LAST)
125 break;
126 /* TODO: find out if padding applies to total of data
127 * transferred or data transferred by this tidaw. Assumption:
128 * applies to total. */
129 data_count += tidaw[i].count;
130 if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
131 cbc_data = 4 + ALIGN(data_count, 4) - data_count;
132 cbc_count += cbc_data;
133 data_count += cbc_data;
134 }
135 }
136 return cbc_count;
137}
138
139/**
140 * tcw_finalize - finalize tcw length fields and tidaw list
141 * @tcw: pointer to the tcw
142 * @num_tidaws: the number of tidaws used to address input/output data or zero
143 * if no tida is used
144 *
145 * Calculate the input-/output-count and tccbl field in the tcw, add a
146 * tcat the tccb and terminate the data tidaw list if used.
147 *
148 * Note: in case input- or output-tida is used, the tidaw-list must be stored
149 * in contiguous storage (no ttic). The tcal field in the tccb must be
150 * up-to-date.
151 */
152void tcw_finalize(struct tcw *tcw, int num_tidaws)
153{
154 struct tidaw *tidaw;
155 struct tccb *tccb;
156 struct tccb_tcat *tcat;
157 u32 count;
158
159 /* Terminate tidaw list. */
160 tidaw = tcw_get_data(tcw);
161 if (num_tidaws > 0)
162 tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
163 /* Add tcat to tccb. */
164 tccb = tcw_get_tccb(tcw);
165 tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
166 memset(tcat, 0, sizeof(tcat));
167 /* Calculate tcw input/output count and tcat transport count. */
168 count = calc_dcw_count(tccb);
169 if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
170 count += calc_cbc_size(tidaw, num_tidaws);
171 if (tcw->r)
172 tcw->input_count = count;
173 else if (tcw->w)
174 tcw->output_count = count;
175 tcat->count = ALIGN(count, 4) + 4;
176 /* Calculate tccbl. */
177 tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
178 sizeof(struct tccb_tcat) - 20) >> 2;
179}
180EXPORT_SYMBOL(tcw_finalize);
181
182/**
183 * tcw_set_intrg - set the interrogate tcw address of a tcw
184 * @tcw: the tcw address
185 * @intrg_tcw: the address of the interrogate tcw
186 *
187 * Set the address of the interrogate tcw in the specified tcw.
188 */
189void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
190{
191 tcw->intrg = (u32) ((addr_t) intrg_tcw);
192}
193EXPORT_SYMBOL(tcw_set_intrg);
194
195/**
196 * tcw_set_data - set data address and tida flag of a tcw
197 * @tcw: the tcw address
198 * @data: the data address
199 * @use_tidal: zero of the data address specifies a contiguous block of data,
200 * non-zero if it specifies a list if tidaws.
201 *
202 * Set the input/output data address of a tcw (depending on the value of the
203 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
204 * is set as well.
205 */
206void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
207{
208 if (tcw->r) {
209 tcw->input = (u64) ((addr_t) data);
210 if (use_tidal)
211 tcw->flags |= TCW_FLAGS_INPUT_TIDA;
212 } else if (tcw->w) {
213 tcw->output = (u64) ((addr_t) data);
214 if (use_tidal)
215 tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
216 }
217}
218EXPORT_SYMBOL(tcw_set_data);
219
220/**
221 * tcw_set_tccb - set tccb address of a tcw
222 * @tcw: the tcw address
223 * @tccb: the tccb address
224 *
225 * Set the address of the tccb in the specified tcw.
226 */
227void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
228{
229 tcw->tccb = (u64) ((addr_t) tccb);
230}
231EXPORT_SYMBOL(tcw_set_tccb);
232
233/**
234 * tcw_set_tsb - set tsb address of a tcw
235 * @tcw: the tcw address
236 * @tsb: the tsb address
237 *
238 * Set the address of the tsb in the specified tcw.
239 */
240void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
241{
242 tcw->tsb = (u64) ((addr_t) tsb);
243}
244EXPORT_SYMBOL(tcw_set_tsb);
245
246/**
247 * tccb_init - initialize tccb
248 * @tccb: the tccb address
249 * @size: the maximum size of the tccb
250 * @sac: the service-action-code to be user
251 *
252 * Initialize the header of the specified tccb by resetting all values to zero
253 * and filling in defaults for format, sac and initial tcal fields.
254 */
255void tccb_init(struct tccb *tccb, size_t size, u32 sac)
256{
257 memset(tccb, 0, size);
258 tccb->tcah.format = TCCB_FORMAT_DEFAULT;
259 tccb->tcah.sac = sac;
260 tccb->tcah.tcal = 12;
261}
262EXPORT_SYMBOL(tccb_init);
263
264/**
265 * tsb_init - initialize tsb
266 * @tsb: the tsb address
267 *
268 * Initialize the specified tsb by resetting all values to zero.
269 */
270void tsb_init(struct tsb *tsb)
271{
272 memset(tsb, 0, sizeof(tsb));
273}
274EXPORT_SYMBOL(tsb_init);
275
276/**
277 * tccb_add_dcw - add a dcw to the tccb
278 * @tccb: the tccb address
279 * @tccb_size: the maximum tccb size
280 * @cmd: the dcw command
281 * @flags: flags for the dcw
282 * @cd: pointer to control data for this dcw or NULL if none is required
283 * @cd_count: number of control data bytes for this dcw
284 * @count: number of data bytes for this dcw
285 *
286 * Add a new dcw to the specified tccb by writing the dcw information specified
287 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
288 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
289 * would exceed the available space as defined by @tccb_size.
290 *
291 * Note: the tcal field of the tccb header will be updates to reflect added
292 * content.
293 */
294struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
295 void *cd, u8 cd_count, u32 count)
296{
297 struct dcw *dcw;
298 int size;
299 int tca_offset;
300
301 /* Check for space. */
302 tca_offset = tca_size(tccb);
303 size = ALIGN(sizeof(struct dcw) + cd_count, 4);
304 if (sizeof(struct tccb_tcah) + tca_offset + size +
305 sizeof(struct tccb_tcat) > tccb_size)
306 return ERR_PTR(-ENOSPC);
307 /* Add dcw to tca. */
308 dcw = (struct dcw *) &tccb->tca[tca_offset];
309 memset(dcw, 0, size);
310 dcw->cmd = cmd;
311 dcw->flags = flags;
312 dcw->count = count;
313 dcw->cd_count = cd_count;
314 if (cd)
315 memcpy(&dcw->cd[0], cd, cd_count);
316 tccb->tcah.tcal += size;
317 return dcw;
318}
319EXPORT_SYMBOL(tccb_add_dcw);
320
321/**
322 * tcw_add_tidaw - add a tidaw to a tcw
323 * @tcw: the tcw address
324 * @num_tidaws: the current number of tidaws
325 * @flags: flags for the new tidaw
326 * @addr: address value for the new tidaw
327 * @count: count value for the new tidaw
328 *
329 * Add a new tidaw to the input/output data tidaw-list of the specified tcw
330 * (depending on the value of the r-flag and w-flag) and return a pointer to
331 * the new tidaw.
332 *
333 * Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
334 * must ensure that there is enough space for the new tidaw. The last-tidaw
335 * flag for the last tidaw in the list will be set by tcw_finalize.
336 */
337struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
338 void *addr, u32 count)
339{
340 struct tidaw *tidaw;
341
342 /* Add tidaw to tidaw-list. */
343 tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
344 memset(tidaw, 0, sizeof(struct tidaw));
345 tidaw->flags = flags;
346 tidaw->count = count;
347 tidaw->addr = (u64) ((addr_t) addr);
348 return tidaw;
349}
350EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 144466ab8c15..528065cb5021 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -8,7 +8,7 @@
8#ifndef S390_IDSET_H 8#ifndef S390_IDSET_H
9#define S390_IDSET_H S390_IDSET_H 9#define S390_IDSET_H S390_IDSET_H
10 10
11#include "schid.h" 11#include <asm/schid.h>
12 12
13struct idset; 13struct idset;
14 14
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 8c613160bfce..3f8f1cf69c76 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,12 +1,12 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include "schid.h" 4#include <asm/schid.h>
5 5
6/* 6/*
7 * operation request block 7 * command-mode operation request block
8 */ 8 */
9struct orb { 9struct cmd_orb {
10 u32 intparm; /* interruption parameter */ 10 u32 intparm; /* interruption parameter */
11 u32 key : 4; /* flags, like key, suspend control, etc. */ 11 u32 key : 4; /* flags, like key, suspend control, etc. */
12 u32 spnd : 1; /* suspend control */ 12 u32 spnd : 1; /* suspend control */
@@ -28,8 +28,36 @@ struct orb {
28 u32 cpa; /* channel program address */ 28 u32 cpa; /* channel program address */
29} __attribute__ ((packed, aligned(4))); 29} __attribute__ ((packed, aligned(4)));
30 30
31/*
32 * transport-mode operation request block
33 */
34struct tm_orb {
35 u32 intparm;
36 u32 key:4;
37 u32 :9;
38 u32 b:1;
39 u32 :2;
40 u32 lpm:8;
41 u32 :7;
42 u32 x:1;
43 u32 tcw;
44 u32 prio:8;
45 u32 :8;
46 u32 rsvpgm:8;
47 u32 :8;
48 u32 :32;
49 u32 :32;
50 u32 :32;
51 u32 :32;
52} __attribute__ ((packed, aligned(4)));
53
54union orb {
55 struct cmd_orb cmd;
56 struct tm_orb tm;
57} __attribute__ ((packed, aligned(4)));
58
31struct io_subchannel_private { 59struct io_subchannel_private {
32 struct orb orb; /* operation request block */ 60 union orb orb; /* operation request block */
33 struct ccw1 sense_ccw; /* static ccw for sense command */ 61 struct ccw1 sense_ccw; /* static ccw for sense command */
34} __attribute__ ((aligned(8))); 62} __attribute__ ((aligned(8)));
35 63
@@ -95,16 +123,18 @@ struct ccw_device_private {
95 void *cmb_wait; /* deferred cmb enable/disable */ 123 void *cmb_wait; /* deferred cmb enable/disable */
96}; 124};
97 125
98static inline int ssch(struct subchannel_id schid, volatile struct orb *addr) 126static inline int ssch(struct subchannel_id schid, volatile union orb *addr)
99{ 127{
100 register struct subchannel_id reg1 asm("1") = schid; 128 register struct subchannel_id reg1 asm("1") = schid;
101 int ccode; 129 int ccode = -EIO;
102 130
103 asm volatile( 131 asm volatile(
104 " ssch 0(%2)\n" 132 " ssch 0(%2)\n"
105 " ipm %0\n" 133 "0: ipm %0\n"
106 " srl %0,28" 134 " srl %0,28\n"
107 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc"); 135 "1:\n"
136 EX_TABLE(0b, 1b)
137 : "+d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
108 return ccode; 138 return ccode;
109} 139}
110 140
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 652ea3625f9d..9fa2ac13ac85 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -2,7 +2,7 @@
2#define S390_CIO_IOASM_H 2#define S390_CIO_IOASM_H
3 3
4#include <asm/chpid.h> 4#include <asm/chpid.h>
5#include "schid.h" 5#include <asm/schid.h>
6 6
7/* 7/*
8 * TPI info structure 8 * TPI info structure
diff --git a/drivers/s390/cio/isc.c b/drivers/s390/cio/isc.c
new file mode 100644
index 000000000000..c592087be0f1
--- /dev/null
+++ b/drivers/s390/cio/isc.c
@@ -0,0 +1,68 @@
1/*
2 * Functions for registration of I/O interruption subclasses on s390.
3 *
4 * Copyright IBM Corp. 2008
5 * Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#include <linux/spinlock.h>
9#include <linux/module.h>
10#include <asm/isc.h>
11
12static unsigned int isc_refs[MAX_ISC + 1];
13static DEFINE_SPINLOCK(isc_ref_lock);
14
15
16/**
17 * isc_register - register an I/O interruption subclass.
18 * @isc: I/O interruption subclass to register
19 *
20 * The number of users for @isc is increased. If this is the first user to
21 * register @isc, the corresponding I/O interruption subclass mask is enabled.
22 *
23 * Context:
24 * This function must not be called in interrupt context.
25 */
26void isc_register(unsigned int isc)
27{
28 if (isc > MAX_ISC) {
29 WARN_ON(1);
30 return;
31 }
32
33 spin_lock(&isc_ref_lock);
34 if (isc_refs[isc] == 0)
35 ctl_set_bit(6, 31 - isc);
36 isc_refs[isc]++;
37 spin_unlock(&isc_ref_lock);
38}
39EXPORT_SYMBOL_GPL(isc_register);
40
41/**
42 * isc_unregister - unregister an I/O interruption subclass.
43 * @isc: I/O interruption subclass to unregister
44 *
45 * The number of users for @isc is decreased. If this is the last user to
46 * unregister @isc, the corresponding I/O interruption subclass mask is
47 * disabled.
48 * Note: This function must not be called if isc_register() hasn't been called
49 * before by the driver for @isc.
50 *
51 * Context:
52 * This function must not be called in interrupt context.
53 */
54void isc_unregister(unsigned int isc)
55{
56 spin_lock(&isc_ref_lock);
57 /* check for misuse */
58 if (isc > MAX_ISC || isc_refs[isc] == 0) {
59 WARN_ON(1);
60 goto out_unlock;
61 }
62 if (isc_refs[isc] == 1)
63 ctl_clear_bit(6, 31 - isc);
64 isc_refs[isc]--;
65out_unlock:
66 spin_unlock(&isc_ref_lock);
67}
68EXPORT_SYMBOL_GPL(isc_unregister);
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
new file mode 100644
index 000000000000..17da9ab932ed
--- /dev/null
+++ b/drivers/s390/cio/itcw.c
@@ -0,0 +1,327 @@
1/*
2 * Functions for incremental construction of fcx enabled I/O control blocks.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/err.h>
13#include <linux/module.h>
14#include <asm/fcx.h>
15#include <asm/itcw.h>
16
17/**
18 * struct itcw - incremental tcw helper data type
19 *
20 * This structure serves as a handle for the incremental construction of a
21 * tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
22 * tcw and associated data. The data structures are contained inside a single
23 * contiguous buffer provided by the user.
24 *
25 * The itcw construction functions take care of overall data integrity:
26 * - reset unused fields to zero
27 * - fill in required pointers
28 * - ensure required alignment for data structures
29 * - prevent data structures to cross 4k-byte boundary where required
30 * - calculate tccb-related length fields
31 * - optionally provide ready-made interrogate tcw and associated structures
32 *
33 * Restrictions apply to the itcws created with these construction functions:
34 * - tida only supported for data address, not for tccb
35 * - only contiguous tidaw-lists (no ttic)
36 * - total number of bytes required per itcw may not exceed 4k bytes
37 * - either read or write operation (may not work with r=0 and w=0)
38 *
39 * Example:
40 * struct itcw *itcw;
41 * void *buffer;
42 * size_t size;
43 *
44 * size = itcw_calc_size(1, 2, 0);
45 * buffer = kmalloc(size, GFP_DMA);
46 * if (!buffer)
47 * return -ENOMEM;
48 * itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
49 * if (IS_ERR(itcw))
50 * return PTR_ER(itcw);
51 * itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
52 * itcw_add_tidaw(itcw, 0, 0x30000, 20);
53 * itcw_add_tidaw(itcw, 0, 0x40000, 52);
54 * itcw_finalize(itcw);
55 *
56 */
57struct itcw {
58 struct tcw *tcw;
59 struct tcw *intrg_tcw;
60 int num_tidaws;
61 int max_tidaws;
62 int intrg_num_tidaws;
63 int intrg_max_tidaws;
64};
65
66/**
67 * itcw_get_tcw - return pointer to tcw associated with the itcw
68 * @itcw: address of the itcw
69 *
70 * Return pointer to the tcw associated with the itcw.
71 */
72struct tcw *itcw_get_tcw(struct itcw *itcw)
73{
74 return itcw->tcw;
75}
76EXPORT_SYMBOL(itcw_get_tcw);
77
78/**
79 * itcw_calc_size - return the size of an itcw with the given parameters
80 * @intrg: if non-zero, add an interrogate tcw
81 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
82 * if no tida is to be used.
83 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
84 * by the interrogate tcw, if specified
85 *
86 * Calculate and return the number of bytes required to hold an itcw with the
87 * given parameters and assuming tccbs with maximum size.
88 *
89 * Note that the resulting size also contains bytes needed for alignment
90 * padding as well as padding to ensure that data structures don't cross a
91 * 4k-boundary where required.
92 */
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{
95 size_t len;
96
97 /* Main data. */
98 len = sizeof(struct itcw);
99 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
100 /* TSB */ sizeof(struct tsb) +
101 /* TIDAL */ max_tidaws * sizeof(struct tidaw);
102 /* Interrogate data. */
103 if (intrg) {
104 len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
105 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 }
108 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0))
112 len += max(max_tidaws, intrg_max_tidaws) *
113 sizeof(struct tidaw) - 1;
114 return len;
115}
116EXPORT_SYMBOL(itcw_calc_size);
117
118#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
119
120static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
121 int align, int check_4k)
122{
123 addr_t addr;
124
125 addr = ALIGN(*start, align);
126 if (check_4k && CROSS4K(addr, len)) {
127 addr = ALIGN(addr, 4096);
128 addr = ALIGN(addr, align);
129 }
130 if (addr + len > end)
131 return ERR_PTR(-ENOSPC);
132 *start = addr + len;
133 return (void *) addr;
134}
135
136/**
137 * itcw_init - initialize incremental tcw data structure
138 * @buffer: address of buffer to use for data structures
139 * @size: number of bytes in buffer
140 * @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
141 * operation tcw
142 * @intrg: if non-zero, add and initialize an interrogate tcw
143 * @max_tidaws: maximum number of tidaws to be used for data addressing or zero
144 * if no tida is to be used.
145 * @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
146 * by the interrogate tcw, if specified
147 *
148 * Prepare the specified buffer to be used as an incremental tcw, i.e. a
149 * helper data structure that can be used to construct a valid tcw by
150 * successive calls to other helper functions. Note: the buffer needs to be
151 * located below the 2G address limit. The resulting tcw has the following
152 * restrictions:
153 * - no tccb tidal
154 * - input/output tidal is contiguous (no ttic)
155 * - total data should not exceed 4k
156 * - tcw specifies either read or write operation
157 *
158 * On success, return pointer to the resulting incremental tcw data structure,
159 * ERR_PTR otherwise.
160 */
161struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
162 int max_tidaws, int intrg_max_tidaws)
163{
164 struct itcw *itcw;
165 void *chunk;
166 addr_t start;
167 addr_t end;
168
169 /* Check for 2G limit. */
170 start = (addr_t) buffer;
171 end = start + size;
172 if (end > (1 << 31))
173 return ERR_PTR(-EINVAL);
174 memset(buffer, 0, size);
175 /* ITCW. */
176 chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
177 if (IS_ERR(chunk))
178 return chunk;
179 itcw = chunk;
180 itcw->max_tidaws = max_tidaws;
181 itcw->intrg_max_tidaws = intrg_max_tidaws;
182 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk))
185 return chunk;
186 itcw->tcw = chunk;
187 tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
188 (op == ITCW_OP_WRITE) ? 1 : 0);
189 /* Interrogate TCW. */
190 if (intrg) {
191 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
192 if (IS_ERR(chunk))
193 return chunk;
194 itcw->intrg_tcw = chunk;
195 tcw_init(itcw->intrg_tcw, 1, 0);
196 tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
197 }
198 /* Data TIDAL. */
199 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1);
202 if (IS_ERR(chunk))
203 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1);
205 }
206 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1);
210 if (IS_ERR(chunk))
211 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1);
213 }
214 /* TSB. */
215 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
216 if (IS_ERR(chunk))
217 return chunk;
218 tsb_init(chunk);
219 tcw_set_tsb(itcw->tcw, chunk);
220 /* Interrogate TSB. */
221 if (intrg) {
222 chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
223 if (IS_ERR(chunk))
224 return chunk;
225 tsb_init(chunk);
226 tcw_set_tsb(itcw->intrg_tcw, chunk);
227 }
228 /* TCCB. */
229 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
230 if (IS_ERR(chunk))
231 return chunk;
232 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
233 tcw_set_tccb(itcw->tcw, chunk);
234 /* Interrogate TCCB. */
235 if (intrg) {
236 chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
237 if (IS_ERR(chunk))
238 return chunk;
239 tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
240 tcw_set_tccb(itcw->intrg_tcw, chunk);
241 tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
242 sizeof(struct dcw_intrg_data), 0);
243 tcw_finalize(itcw->intrg_tcw, 0);
244 }
245 return itcw;
246}
247EXPORT_SYMBOL(itcw_init);
248
249/**
250 * itcw_add_dcw - add a dcw to the itcw
251 * @itcw: address of the itcw
252 * @cmd: the dcw command
253 * @flags: flags for the dcw
254 * @cd: address of control data for this dcw or NULL if none is required
255 * @cd_count: number of control data bytes for this dcw
256 * @count: number of data bytes for this dcw
257 *
258 * Add a new dcw to the specified itcw by writing the dcw information specified
259 * by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
260 * a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
261 * would exceed the available space.
262 *
263 * Note: the tcal field of the tccb header will be updated to reflect added
264 * content.
265 */
266struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
267 u8 cd_count, u32 count)
268{
269 return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
270 flags, cd, cd_count, count);
271}
272EXPORT_SYMBOL(itcw_add_dcw);
273
274/**
275 * itcw_add_tidaw - add a tidaw to the itcw
276 * @itcw: address of the itcw
277 * @flags: flags for the new tidaw
278 * @addr: address value for the new tidaw
279 * @count: count value for the new tidaw
280 *
281 * Add a new tidaw to the input/output data tidaw-list of the specified itcw
282 * (depending on the value of the r-flag and w-flag). Return a pointer to
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space.
285 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize.
288 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{
291 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC);
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294}
295EXPORT_SYMBOL(itcw_add_tidaw);
296
297/**
298 * itcw_set_data - set data address and tida flag of the itcw
299 * @itcw: address of the itcw
300 * @addr: the data address
301 * @use_tidal: zero of the data address specifies a contiguous block of data,
302 * non-zero if it specifies a list if tidaws.
303 *
304 * Set the input/output data address of the itcw (depending on the value of the
305 * r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
306 * is set as well.
307 */
308void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
309{
310 tcw_set_data(itcw->tcw, addr, use_tidal);
311}
312EXPORT_SYMBOL(itcw_set_data);
313
314/**
315 * itcw_finalize - calculate length and count fields of the itcw
316 * @itcw: address of the itcw
317 *
318 * Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
319 * In case input- or output-tida is used, the tidaw-list must be stored in
320 * continuous storage (no ttic). The tcal field in the tccb must be
321 * up-to-date.
322 */
323void itcw_finalize(struct itcw *itcw)
324{
325 tcw_finalize(itcw->tcw, itcw->num_tidaws);
326}
327EXPORT_SYMBOL(itcw_finalize);
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 445cf364e461..2bf36e14b102 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -2082,7 +2082,6 @@ qdio_timeout_handler(struct ccw_device *cdev)
2082 default: 2082 default:
2083 BUG(); 2083 BUG();
2084 } 2084 }
2085 ccw_device_set_timeout(cdev, 0);
2086 wake_up(&cdev->private->wait_q); 2085 wake_up(&cdev->private->wait_q);
2087} 2086}
2088 2087
@@ -2121,6 +2120,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2121 case -EIO: 2120 case -EIO:
2122 QDIO_PRINT_ERR("i/o error on device %s\n", 2121 QDIO_PRINT_ERR("i/o error on device %s\n",
2123 cdev->dev.bus_id); 2122 cdev->dev.bus_id);
2123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2124 wake_up(&cdev->private->wait_q);
2124 return; 2125 return;
2125 case -ETIMEDOUT: 2126 case -ETIMEDOUT:
2126 qdio_timeout_handler(cdev); 2127 qdio_timeout_handler(cdev);
@@ -2139,8 +2140,8 @@ qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2139 QDIO_DBF_TEXT4(0, trace, dbf_text); 2140 QDIO_DBF_TEXT4(0, trace, dbf_text);
2140#endif /* CONFIG_QDIO_DEBUG */ 2141#endif /* CONFIG_QDIO_DEBUG */
2141 2142
2142 cstat = irb->scsw.cstat; 2143 cstat = irb->scsw.cmd.cstat;
2143 dstat = irb->scsw.dstat; 2144 dstat = irb->scsw.cmd.dstat;
2144 2145
2145 switch (irq_ptr->state) { 2146 switch (irq_ptr->state) {
2146 case QDIO_IRQ_STATE_INACTIVE: 2147 case QDIO_IRQ_STATE_INACTIVE:
@@ -2353,9 +2354,6 @@ tiqdio_check_chsc_availability(void)
2353{ 2354{
2354 char dbf_text[15]; 2355 char dbf_text[15];
2355 2356
2356 if (!css_characteristics_avail)
2357 return -EIO;
2358
2359 /* Check for bit 41. */ 2357 /* Check for bit 41. */
2360 if (!css_general_characteristics.aif) { 2358 if (!css_general_characteristics.aif) {
2361 QDIO_PRINT_WARN("Adapter interruption facility not " \ 2359 QDIO_PRINT_WARN("Adapter interruption facility not " \
@@ -2667,12 +2665,12 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2667 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2665 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2668 } else if (rc == 0) { 2666 } else if (rc == 0) {
2669 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 2667 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2670 ccw_device_set_timeout(cdev, timeout);
2671 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); 2668 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2672 2669
2673 wait_event(cdev->private->wait_q, 2670 wait_event_interruptible_timeout(cdev->private->wait_q,
2674 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 2671 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2675 irq_ptr->state == QDIO_IRQ_STATE_ERR); 2672 irq_ptr->state == QDIO_IRQ_STATE_ERR,
2673 timeout);
2676 } else { 2674 } else {
2677 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " 2675 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2678 "device %s\n", result, cdev->dev.bus_id); 2676 "device %s\n", result, cdev->dev.bus_id);
@@ -2692,7 +2690,6 @@ qdio_shutdown(struct ccw_device *cdev, int how)
2692 2690
2693 /* Ignore errors. */ 2691 /* Ignore errors. */
2694 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 2692 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2695 ccw_device_set_timeout(cdev, 0);
2696out: 2693out:
2697 up(&irq_ptr->setting_up_sema); 2694 up(&irq_ptr->setting_up_sema);
2698 return result; 2695 return result;
@@ -2907,13 +2904,10 @@ qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2907 QDIO_DBF_TEXT0(0,setup,dbf_text); 2904 QDIO_DBF_TEXT0(0,setup,dbf_text);
2908 QDIO_DBF_TEXT0(0,trace,dbf_text); 2905 QDIO_DBF_TEXT0(0,trace,dbf_text);
2909 2906
2910 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { 2907 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat))
2911 ccw_device_set_timeout(cdev, 0);
2912 return; 2908 return;
2913 }
2914 2909
2915 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); 2910 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2916 ccw_device_set_timeout(cdev, 0);
2917} 2911}
2918 2912
2919int 2913int
@@ -3196,8 +3190,6 @@ qdio_establish(struct qdio_initialize *init_data)
3196 irq_ptr->schid.ssid, irq_ptr->schid.sch_no, 3190 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3197 result, result2); 3191 result, result2);
3198 result=result2; 3192 result=result2;
3199 if (result)
3200 ccw_device_set_timeout(cdev, 0);
3201 } 3193 }
3202 3194
3203 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); 3195 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
@@ -3279,7 +3271,6 @@ qdio_activate(struct ccw_device *cdev, int flags)
3279 3271
3280 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); 3272 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3281 3273
3282 ccw_device_set_timeout(cdev, 0);
3283 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 3274 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3284 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, 3275 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3285 0, DOIO_DENY_PREFETCH); 3276 0, DOIO_DENY_PREFETCH);
@@ -3722,7 +3713,8 @@ tiqdio_register_thinints(void)
3722 char dbf_text[20]; 3713 char dbf_text[20];
3723 3714
3724 tiqdio_ind = 3715 tiqdio_ind =
3725 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL); 3716 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL,
3717 TIQDIO_THININT_ISC);
3726 if (IS_ERR(tiqdio_ind)) { 3718 if (IS_ERR(tiqdio_ind)) {
3727 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind)); 3719 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3728 QDIO_DBF_TEXT0(0,setup,dbf_text); 3720 QDIO_DBF_TEXT0(0,setup,dbf_text);
@@ -3738,7 +3730,8 @@ static void
3738tiqdio_unregister_thinints(void) 3730tiqdio_unregister_thinints(void)
3739{ 3731{
3740 if (tiqdio_ind) 3732 if (tiqdio_ind)
3741 s390_unregister_adapter_interrupt(tiqdio_ind); 3733 s390_unregister_adapter_interrupt(tiqdio_ind,
3734 TIQDIO_THININT_ISC);
3742} 3735}
3743 3736
3744static int 3737static int
@@ -3899,6 +3892,7 @@ init_QDIO(void)
3899 qdio_mempool_alloc, 3892 qdio_mempool_alloc,
3900 qdio_mempool_free, NULL); 3893 qdio_mempool_free, NULL);
3901 3894
3895 isc_register(QDIO_AIRQ_ISC);
3902 if (tiqdio_check_chsc_availability()) 3896 if (tiqdio_check_chsc_availability())
3903 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); 3897 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3904 3898
@@ -3911,6 +3905,7 @@ static void __exit
3911cleanup_QDIO(void) 3905cleanup_QDIO(void)
3912{ 3906{
3913 tiqdio_unregister_thinints(); 3907 tiqdio_unregister_thinints();
3908 isc_unregister(QDIO_AIRQ_ISC);
3914 qdio_remove_procfs_entry(); 3909 qdio_remove_procfs_entry();
3915 qdio_release_qdio_memory(); 3910 qdio_release_qdio_memory();
3916 qdio_unregister_dbf_views(); 3911 qdio_unregister_dbf_views();
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index c3df6b2c38b7..7656081a24d2 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -2,8 +2,8 @@
2#define _CIO_QDIO_H 2#define _CIO_QDIO_H
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5#include <asm/isc.h>
6#include "schid.h" 6#include <asm/schid.h>
7 7
8#ifdef CONFIG_QDIO_DEBUG 8#ifdef CONFIG_QDIO_DEBUG
9#define QDIO_VERBOSE_LEVEL 9 9#define QDIO_VERBOSE_LEVEL 9
@@ -26,7 +26,7 @@
26 */ 26 */
27#define IQDIO_FILL_LEVEL_TO_POLL 4 27#define IQDIO_FILL_LEVEL_TO_POLL 4
28 28
29#define TIQDIO_THININT_ISC 3 29#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
30#define TIQDIO_DELAY_TARGET 0 30#define TIQDIO_DELAY_TARGET 0
31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ 31#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ 32#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
diff --git a/drivers/s390/cio/schid.h b/drivers/s390/cio/schid.h
deleted file mode 100644
index 54328fec5ade..000000000000
--- a/drivers/s390/cio/schid.h
+++ /dev/null
@@ -1,26 +0,0 @@
1#ifndef S390_SCHID_H
2#define S390_SCHID_H
3
4struct subchannel_id {
5 __u32 reserved:13;
6 __u32 ssid:2;
7 __u32 one:1;
8 __u32 sch_no:16;
9} __attribute__ ((packed,aligned(4)));
10
11
12/* Helper function for sane state of pre-allocated subchannel_id. */
13static inline void
14init_subchannel_id(struct subchannel_id *schid)
15{
16 memset(schid, 0, sizeof(struct subchannel_id));
17 schid->one = 1;
18}
19
20static inline int
21schid_equal(struct subchannel_id *schid1, struct subchannel_id *schid2)
22{
23 return !memcmp(schid1, schid2, sizeof(struct subchannel_id));
24}
25
26#endif /* S390_SCHID_H */
diff --git a/drivers/s390/cio/scsw.c b/drivers/s390/cio/scsw.c
new file mode 100644
index 000000000000..f8da25ab576d
--- /dev/null
+++ b/drivers/s390/cio/scsw.c
@@ -0,0 +1,843 @@
1/*
2 * Helper functions for scsw access.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <asm/cio.h>
11#include "css.h"
12#include "chsc.h"
13
14/**
15 * scsw_is_tm - check for transport mode scsw
16 * @scsw: pointer to scsw
17 *
18 * Return non-zero if the specified scsw is a transport mode scsw, zero
19 * otherwise.
20 */
21int scsw_is_tm(union scsw *scsw)
22{
23 return css_general_characteristics.fcx && (scsw->tm.x == 1);
24}
25EXPORT_SYMBOL(scsw_is_tm);
26
27/**
28 * scsw_key - return scsw key field
29 * @scsw: pointer to scsw
30 *
31 * Return the value of the key field of the specified scsw, regardless of
32 * whether it is a transport mode or command mode scsw.
33 */
34u32 scsw_key(union scsw *scsw)
35{
36 if (scsw_is_tm(scsw))
37 return scsw->tm.key;
38 else
39 return scsw->cmd.key;
40}
41EXPORT_SYMBOL(scsw_key);
42
43/**
44 * scsw_eswf - return scsw eswf field
45 * @scsw: pointer to scsw
46 *
47 * Return the value of the eswf field of the specified scsw, regardless of
48 * whether it is a transport mode or command mode scsw.
49 */
50u32 scsw_eswf(union scsw *scsw)
51{
52 if (scsw_is_tm(scsw))
53 return scsw->tm.eswf;
54 else
55 return scsw->cmd.eswf;
56}
57EXPORT_SYMBOL(scsw_eswf);
58
59/**
60 * scsw_cc - return scsw cc field
61 * @scsw: pointer to scsw
62 *
63 * Return the value of the cc field of the specified scsw, regardless of
64 * whether it is a transport mode or command mode scsw.
65 */
66u32 scsw_cc(union scsw *scsw)
67{
68 if (scsw_is_tm(scsw))
69 return scsw->tm.cc;
70 else
71 return scsw->cmd.cc;
72}
73EXPORT_SYMBOL(scsw_cc);
74
75/**
76 * scsw_ectl - return scsw ectl field
77 * @scsw: pointer to scsw
78 *
79 * Return the value of the ectl field of the specified scsw, regardless of
80 * whether it is a transport mode or command mode scsw.
81 */
82u32 scsw_ectl(union scsw *scsw)
83{
84 if (scsw_is_tm(scsw))
85 return scsw->tm.ectl;
86 else
87 return scsw->cmd.ectl;
88}
89EXPORT_SYMBOL(scsw_ectl);
90
91/**
92 * scsw_pno - return scsw pno field
93 * @scsw: pointer to scsw
94 *
95 * Return the value of the pno field of the specified scsw, regardless of
96 * whether it is a transport mode or command mode scsw.
97 */
98u32 scsw_pno(union scsw *scsw)
99{
100 if (scsw_is_tm(scsw))
101 return scsw->tm.pno;
102 else
103 return scsw->cmd.pno;
104}
105EXPORT_SYMBOL(scsw_pno);
106
107/**
108 * scsw_fctl - return scsw fctl field
109 * @scsw: pointer to scsw
110 *
111 * Return the value of the fctl field of the specified scsw, regardless of
112 * whether it is a transport mode or command mode scsw.
113 */
114u32 scsw_fctl(union scsw *scsw)
115{
116 if (scsw_is_tm(scsw))
117 return scsw->tm.fctl;
118 else
119 return scsw->cmd.fctl;
120}
121EXPORT_SYMBOL(scsw_fctl);
122
123/**
124 * scsw_actl - return scsw actl field
125 * @scsw: pointer to scsw
126 *
127 * Return the value of the actl field of the specified scsw, regardless of
128 * whether it is a transport mode or command mode scsw.
129 */
130u32 scsw_actl(union scsw *scsw)
131{
132 if (scsw_is_tm(scsw))
133 return scsw->tm.actl;
134 else
135 return scsw->cmd.actl;
136}
137EXPORT_SYMBOL(scsw_actl);
138
139/**
140 * scsw_stctl - return scsw stctl field
141 * @scsw: pointer to scsw
142 *
143 * Return the value of the stctl field of the specified scsw, regardless of
144 * whether it is a transport mode or command mode scsw.
145 */
146u32 scsw_stctl(union scsw *scsw)
147{
148 if (scsw_is_tm(scsw))
149 return scsw->tm.stctl;
150 else
151 return scsw->cmd.stctl;
152}
153EXPORT_SYMBOL(scsw_stctl);
154
155/**
156 * scsw_dstat - return scsw dstat field
157 * @scsw: pointer to scsw
158 *
159 * Return the value of the dstat field of the specified scsw, regardless of
160 * whether it is a transport mode or command mode scsw.
161 */
162u32 scsw_dstat(union scsw *scsw)
163{
164 if (scsw_is_tm(scsw))
165 return scsw->tm.dstat;
166 else
167 return scsw->cmd.dstat;
168}
169EXPORT_SYMBOL(scsw_dstat);
170
171/**
172 * scsw_cstat - return scsw cstat field
173 * @scsw: pointer to scsw
174 *
175 * Return the value of the cstat field of the specified scsw, regardless of
176 * whether it is a transport mode or command mode scsw.
177 */
178u32 scsw_cstat(union scsw *scsw)
179{
180 if (scsw_is_tm(scsw))
181 return scsw->tm.cstat;
182 else
183 return scsw->cmd.cstat;
184}
185EXPORT_SYMBOL(scsw_cstat);
186
187/**
188 * scsw_cmd_is_valid_key - check key field validity
189 * @scsw: pointer to scsw
190 *
191 * Return non-zero if the key field of the specified command mode scsw is
192 * valid, zero otherwise.
193 */
194int scsw_cmd_is_valid_key(union scsw *scsw)
195{
196 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
197}
198EXPORT_SYMBOL(scsw_cmd_is_valid_key);
199
200/**
201 * scsw_cmd_is_valid_sctl - check fctl field validity
202 * @scsw: pointer to scsw
203 *
204 * Return non-zero if the fctl field of the specified command mode scsw is
205 * valid, zero otherwise.
206 */
207int scsw_cmd_is_valid_sctl(union scsw *scsw)
208{
209 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
210}
211EXPORT_SYMBOL(scsw_cmd_is_valid_sctl);
212
213/**
214 * scsw_cmd_is_valid_eswf - check eswf field validity
215 * @scsw: pointer to scsw
216 *
217 * Return non-zero if the eswf field of the specified command mode scsw is
218 * valid, zero otherwise.
219 */
220int scsw_cmd_is_valid_eswf(union scsw *scsw)
221{
222 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
223}
224EXPORT_SYMBOL(scsw_cmd_is_valid_eswf);
225
226/**
227 * scsw_cmd_is_valid_cc - check cc field validity
228 * @scsw: pointer to scsw
229 *
230 * Return non-zero if the cc field of the specified command mode scsw is
231 * valid, zero otherwise.
232 */
233int scsw_cmd_is_valid_cc(union scsw *scsw)
234{
235 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
236 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND);
237}
238EXPORT_SYMBOL(scsw_cmd_is_valid_cc);
239
240/**
241 * scsw_cmd_is_valid_fmt - check fmt field validity
242 * @scsw: pointer to scsw
243 *
244 * Return non-zero if the fmt field of the specified command mode scsw is
245 * valid, zero otherwise.
246 */
247int scsw_cmd_is_valid_fmt(union scsw *scsw)
248{
249 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
250}
251EXPORT_SYMBOL(scsw_cmd_is_valid_fmt);
252
253/**
254 * scsw_cmd_is_valid_pfch - check pfch field validity
255 * @scsw: pointer to scsw
256 *
257 * Return non-zero if the pfch field of the specified command mode scsw is
258 * valid, zero otherwise.
259 */
260int scsw_cmd_is_valid_pfch(union scsw *scsw)
261{
262 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
263}
264EXPORT_SYMBOL(scsw_cmd_is_valid_pfch);
265
266/**
267 * scsw_cmd_is_valid_isic - check isic field validity
268 * @scsw: pointer to scsw
269 *
270 * Return non-zero if the isic field of the specified command mode scsw is
271 * valid, zero otherwise.
272 */
273int scsw_cmd_is_valid_isic(union scsw *scsw)
274{
275 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
276}
277EXPORT_SYMBOL(scsw_cmd_is_valid_isic);
278
279/**
280 * scsw_cmd_is_valid_alcc - check alcc field validity
281 * @scsw: pointer to scsw
282 *
283 * Return non-zero if the alcc field of the specified command mode scsw is
284 * valid, zero otherwise.
285 */
286int scsw_cmd_is_valid_alcc(union scsw *scsw)
287{
288 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
289}
290EXPORT_SYMBOL(scsw_cmd_is_valid_alcc);
291
292/**
293 * scsw_cmd_is_valid_ssi - check ssi field validity
294 * @scsw: pointer to scsw
295 *
296 * Return non-zero if the ssi field of the specified command mode scsw is
297 * valid, zero otherwise.
298 */
299int scsw_cmd_is_valid_ssi(union scsw *scsw)
300{
301 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC);
302}
303EXPORT_SYMBOL(scsw_cmd_is_valid_ssi);
304
305/**
306 * scsw_cmd_is_valid_zcc - check zcc field validity
307 * @scsw: pointer to scsw
308 *
309 * Return non-zero if the zcc field of the specified command mode scsw is
310 * valid, zero otherwise.
311 */
312int scsw_cmd_is_valid_zcc(union scsw *scsw)
313{
314 return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) &&
315 (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS);
316}
317EXPORT_SYMBOL(scsw_cmd_is_valid_zcc);
318
319/**
320 * scsw_cmd_is_valid_ectl - check ectl field validity
321 * @scsw: pointer to scsw
322 *
323 * Return non-zero if the ectl field of the specified command mode scsw is
324 * valid, zero otherwise.
325 */
326int scsw_cmd_is_valid_ectl(union scsw *scsw)
327{
328 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
329 !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
330 (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS);
331}
332EXPORT_SYMBOL(scsw_cmd_is_valid_ectl);
333
334/**
335 * scsw_cmd_is_valid_pno - check pno field validity
336 * @scsw: pointer to scsw
337 *
338 * Return non-zero if the pno field of the specified command mode scsw is
339 * valid, zero otherwise.
340 */
341int scsw_cmd_is_valid_pno(union scsw *scsw)
342{
343 return (scsw->cmd.fctl != 0) &&
344 (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
345 (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) ||
346 ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) &&
347 (scsw->cmd.actl & SCSW_ACTL_SUSPENDED)));
348}
349EXPORT_SYMBOL(scsw_cmd_is_valid_pno);
350
351/**
352 * scsw_cmd_is_valid_fctl - check fctl field validity
353 * @scsw: pointer to scsw
354 *
355 * Return non-zero if the fctl field of the specified command mode scsw is
356 * valid, zero otherwise.
357 */
358int scsw_cmd_is_valid_fctl(union scsw *scsw)
359{
360 /* Only valid if pmcw.dnv == 1*/
361 return 1;
362}
363EXPORT_SYMBOL(scsw_cmd_is_valid_fctl);
364
365/**
366 * scsw_cmd_is_valid_actl - check actl field validity
367 * @scsw: pointer to scsw
368 *
369 * Return non-zero if the actl field of the specified command mode scsw is
370 * valid, zero otherwise.
371 */
372int scsw_cmd_is_valid_actl(union scsw *scsw)
373{
374 /* Only valid if pmcw.dnv == 1*/
375 return 1;
376}
377EXPORT_SYMBOL(scsw_cmd_is_valid_actl);
378
379/**
380 * scsw_cmd_is_valid_stctl - check stctl field validity
381 * @scsw: pointer to scsw
382 *
383 * Return non-zero if the stctl field of the specified command mode scsw is
384 * valid, zero otherwise.
385 */
386int scsw_cmd_is_valid_stctl(union scsw *scsw)
387{
388 /* Only valid if pmcw.dnv == 1*/
389 return 1;
390}
391EXPORT_SYMBOL(scsw_cmd_is_valid_stctl);
392
393/**
394 * scsw_cmd_is_valid_dstat - check dstat field validity
395 * @scsw: pointer to scsw
396 *
397 * Return non-zero if the dstat field of the specified command mode scsw is
398 * valid, zero otherwise.
399 */
400int scsw_cmd_is_valid_dstat(union scsw *scsw)
401{
402 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
403 (scsw->cmd.cc != 3);
404}
405EXPORT_SYMBOL(scsw_cmd_is_valid_dstat);
406
407/**
408 * scsw_cmd_is_valid_cstat - check cstat field validity
409 * @scsw: pointer to scsw
410 *
411 * Return non-zero if the cstat field of the specified command mode scsw is
412 * valid, zero otherwise.
413 */
414int scsw_cmd_is_valid_cstat(union scsw *scsw)
415{
416 return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) &&
417 (scsw->cmd.cc != 3);
418}
419EXPORT_SYMBOL(scsw_cmd_is_valid_cstat);
420
421/**
422 * scsw_tm_is_valid_key - check key field validity
423 * @scsw: pointer to scsw
424 *
425 * Return non-zero if the key field of the specified transport mode scsw is
426 * valid, zero otherwise.
427 */
428int scsw_tm_is_valid_key(union scsw *scsw)
429{
430 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC);
431}
432EXPORT_SYMBOL(scsw_tm_is_valid_key);
433
434/**
435 * scsw_tm_is_valid_eswf - check eswf field validity
436 * @scsw: pointer to scsw
437 *
438 * Return non-zero if the eswf field of the specified transport mode scsw is
439 * valid, zero otherwise.
440 */
441int scsw_tm_is_valid_eswf(union scsw *scsw)
442{
443 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
444}
445EXPORT_SYMBOL(scsw_tm_is_valid_eswf);
446
447/**
448 * scsw_tm_is_valid_cc - check cc field validity
449 * @scsw: pointer to scsw
450 *
451 * Return non-zero if the cc field of the specified transport mode scsw is
452 * valid, zero otherwise.
453 */
454int scsw_tm_is_valid_cc(union scsw *scsw)
455{
456 return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) &&
457 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND);
458}
459EXPORT_SYMBOL(scsw_tm_is_valid_cc);
460
461/**
462 * scsw_tm_is_valid_fmt - check fmt field validity
463 * @scsw: pointer to scsw
464 *
465 * Return non-zero if the fmt field of the specified transport mode scsw is
466 * valid, zero otherwise.
467 */
468int scsw_tm_is_valid_fmt(union scsw *scsw)
469{
470 return 1;
471}
472EXPORT_SYMBOL(scsw_tm_is_valid_fmt);
473
474/**
475 * scsw_tm_is_valid_x - check x field validity
476 * @scsw: pointer to scsw
477 *
478 * Return non-zero if the x field of the specified transport mode scsw is
479 * valid, zero otherwise.
480 */
481int scsw_tm_is_valid_x(union scsw *scsw)
482{
483 return 1;
484}
485EXPORT_SYMBOL(scsw_tm_is_valid_x);
486
487/**
488 * scsw_tm_is_valid_q - check q field validity
489 * @scsw: pointer to scsw
490 *
491 * Return non-zero if the q field of the specified transport mode scsw is
492 * valid, zero otherwise.
493 */
494int scsw_tm_is_valid_q(union scsw *scsw)
495{
496 return 1;
497}
498EXPORT_SYMBOL(scsw_tm_is_valid_q);
499
500/**
501 * scsw_tm_is_valid_ectl - check ectl field validity
502 * @scsw: pointer to scsw
503 *
504 * Return non-zero if the ectl field of the specified transport mode scsw is
505 * valid, zero otherwise.
506 */
507int scsw_tm_is_valid_ectl(union scsw *scsw)
508{
509 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
510 !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
511 (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS);
512}
513EXPORT_SYMBOL(scsw_tm_is_valid_ectl);
514
515/**
516 * scsw_tm_is_valid_pno - check pno field validity
517 * @scsw: pointer to scsw
518 *
519 * Return non-zero if the pno field of the specified transport mode scsw is
520 * valid, zero otherwise.
521 */
522int scsw_tm_is_valid_pno(union scsw *scsw)
523{
524 return (scsw->tm.fctl != 0) &&
525 (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
526 (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) ||
527 ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) &&
528 (scsw->tm.actl & SCSW_ACTL_SUSPENDED)));
529}
530EXPORT_SYMBOL(scsw_tm_is_valid_pno);
531
532/**
533 * scsw_tm_is_valid_fctl - check fctl field validity
534 * @scsw: pointer to scsw
535 *
536 * Return non-zero if the fctl field of the specified transport mode scsw is
537 * valid, zero otherwise.
538 */
539int scsw_tm_is_valid_fctl(union scsw *scsw)
540{
541 /* Only valid if pmcw.dnv == 1*/
542 return 1;
543}
544EXPORT_SYMBOL(scsw_tm_is_valid_fctl);
545
546/**
547 * scsw_tm_is_valid_actl - check actl field validity
548 * @scsw: pointer to scsw
549 *
550 * Return non-zero if the actl field of the specified transport mode scsw is
551 * valid, zero otherwise.
552 */
553int scsw_tm_is_valid_actl(union scsw *scsw)
554{
555 /* Only valid if pmcw.dnv == 1*/
556 return 1;
557}
558EXPORT_SYMBOL(scsw_tm_is_valid_actl);
559
560/**
561 * scsw_tm_is_valid_stctl - check stctl field validity
562 * @scsw: pointer to scsw
563 *
564 * Return non-zero if the stctl field of the specified transport mode scsw is
565 * valid, zero otherwise.
566 */
567int scsw_tm_is_valid_stctl(union scsw *scsw)
568{
569 /* Only valid if pmcw.dnv == 1*/
570 return 1;
571}
572EXPORT_SYMBOL(scsw_tm_is_valid_stctl);
573
574/**
575 * scsw_tm_is_valid_dstat - check dstat field validity
576 * @scsw: pointer to scsw
577 *
578 * Return non-zero if the dstat field of the specified transport mode scsw is
579 * valid, zero otherwise.
580 */
581int scsw_tm_is_valid_dstat(union scsw *scsw)
582{
583 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
584 (scsw->tm.cc != 3);
585}
586EXPORT_SYMBOL(scsw_tm_is_valid_dstat);
587
588/**
589 * scsw_tm_is_valid_cstat - check cstat field validity
590 * @scsw: pointer to scsw
591 *
592 * Return non-zero if the cstat field of the specified transport mode scsw is
593 * valid, zero otherwise.
594 */
595int scsw_tm_is_valid_cstat(union scsw *scsw)
596{
597 return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) &&
598 (scsw->tm.cc != 3);
599}
600EXPORT_SYMBOL(scsw_tm_is_valid_cstat);
601
602/**
603 * scsw_tm_is_valid_fcxs - check fcxs field validity
604 * @scsw: pointer to scsw
605 *
606 * Return non-zero if the fcxs field of the specified transport mode scsw is
607 * valid, zero otherwise.
608 */
609int scsw_tm_is_valid_fcxs(union scsw *scsw)
610{
611 return 1;
612}
613EXPORT_SYMBOL(scsw_tm_is_valid_fcxs);
614
615/**
616 * scsw_tm_is_valid_schxs - check schxs field validity
617 * @scsw: pointer to scsw
618 *
619 * Return non-zero if the schxs field of the specified transport mode scsw is
620 * valid, zero otherwise.
621 */
622int scsw_tm_is_valid_schxs(union scsw *scsw)
623{
624 return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK |
625 SCHN_STAT_INTF_CTRL_CHK |
626 SCHN_STAT_PROT_CHECK |
627 SCHN_STAT_CHN_DATA_CHK));
628}
629EXPORT_SYMBOL(scsw_tm_is_valid_schxs);
630
631/**
632 * scsw_is_valid_actl - check actl field validity
633 * @scsw: pointer to scsw
634 *
635 * Return non-zero if the actl field of the specified scsw is valid,
636 * regardless of whether it is a transport mode or command mode scsw.
637 * Return zero if the field does not contain a valid value.
638 */
639int scsw_is_valid_actl(union scsw *scsw)
640{
641 if (scsw_is_tm(scsw))
642 return scsw_tm_is_valid_actl(scsw);
643 else
644 return scsw_cmd_is_valid_actl(scsw);
645}
646EXPORT_SYMBOL(scsw_is_valid_actl);
647
648/**
649 * scsw_is_valid_cc - check cc field validity
650 * @scsw: pointer to scsw
651 *
652 * Return non-zero if the cc field of the specified scsw is valid,
653 * regardless of whether it is a transport mode or command mode scsw.
654 * Return zero if the field does not contain a valid value.
655 */
656int scsw_is_valid_cc(union scsw *scsw)
657{
658 if (scsw_is_tm(scsw))
659 return scsw_tm_is_valid_cc(scsw);
660 else
661 return scsw_cmd_is_valid_cc(scsw);
662}
663EXPORT_SYMBOL(scsw_is_valid_cc);
664
665/**
666 * scsw_is_valid_cstat - check cstat field validity
667 * @scsw: pointer to scsw
668 *
669 * Return non-zero if the cstat field of the specified scsw is valid,
670 * regardless of whether it is a transport mode or command mode scsw.
671 * Return zero if the field does not contain a valid value.
672 */
673int scsw_is_valid_cstat(union scsw *scsw)
674{
675 if (scsw_is_tm(scsw))
676 return scsw_tm_is_valid_cstat(scsw);
677 else
678 return scsw_cmd_is_valid_cstat(scsw);
679}
680EXPORT_SYMBOL(scsw_is_valid_cstat);
681
682/**
683 * scsw_is_valid_dstat - check dstat field validity
684 * @scsw: pointer to scsw
685 *
686 * Return non-zero if the dstat field of the specified scsw is valid,
687 * regardless of whether it is a transport mode or command mode scsw.
688 * Return zero if the field does not contain a valid value.
689 */
690int scsw_is_valid_dstat(union scsw *scsw)
691{
692 if (scsw_is_tm(scsw))
693 return scsw_tm_is_valid_dstat(scsw);
694 else
695 return scsw_cmd_is_valid_dstat(scsw);
696}
697EXPORT_SYMBOL(scsw_is_valid_dstat);
698
699/**
700 * scsw_is_valid_ectl - check ectl field validity
701 * @scsw: pointer to scsw
702 *
703 * Return non-zero if the ectl field of the specified scsw is valid,
704 * regardless of whether it is a transport mode or command mode scsw.
705 * Return zero if the field does not contain a valid value.
706 */
707int scsw_is_valid_ectl(union scsw *scsw)
708{
709 if (scsw_is_tm(scsw))
710 return scsw_tm_is_valid_ectl(scsw);
711 else
712 return scsw_cmd_is_valid_ectl(scsw);
713}
714EXPORT_SYMBOL(scsw_is_valid_ectl);
715
716/**
717 * scsw_is_valid_eswf - check eswf field validity
718 * @scsw: pointer to scsw
719 *
720 * Return non-zero if the eswf field of the specified scsw is valid,
721 * regardless of whether it is a transport mode or command mode scsw.
722 * Return zero if the field does not contain a valid value.
723 */
724int scsw_is_valid_eswf(union scsw *scsw)
725{
726 if (scsw_is_tm(scsw))
727 return scsw_tm_is_valid_eswf(scsw);
728 else
729 return scsw_cmd_is_valid_eswf(scsw);
730}
731EXPORT_SYMBOL(scsw_is_valid_eswf);
732
733/**
734 * scsw_is_valid_fctl - check fctl field validity
735 * @scsw: pointer to scsw
736 *
737 * Return non-zero if the fctl field of the specified scsw is valid,
738 * regardless of whether it is a transport mode or command mode scsw.
739 * Return zero if the field does not contain a valid value.
740 */
741int scsw_is_valid_fctl(union scsw *scsw)
742{
743 if (scsw_is_tm(scsw))
744 return scsw_tm_is_valid_fctl(scsw);
745 else
746 return scsw_cmd_is_valid_fctl(scsw);
747}
748EXPORT_SYMBOL(scsw_is_valid_fctl);
749
750/**
751 * scsw_is_valid_key - check key field validity
752 * @scsw: pointer to scsw
753 *
754 * Return non-zero if the key field of the specified scsw is valid,
755 * regardless of whether it is a transport mode or command mode scsw.
756 * Return zero if the field does not contain a valid value.
757 */
758int scsw_is_valid_key(union scsw *scsw)
759{
760 if (scsw_is_tm(scsw))
761 return scsw_tm_is_valid_key(scsw);
762 else
763 return scsw_cmd_is_valid_key(scsw);
764}
765EXPORT_SYMBOL(scsw_is_valid_key);
766
767/**
768 * scsw_is_valid_pno - check pno field validity
769 * @scsw: pointer to scsw
770 *
771 * Return non-zero if the pno field of the specified scsw is valid,
772 * regardless of whether it is a transport mode or command mode scsw.
773 * Return zero if the field does not contain a valid value.
774 */
775int scsw_is_valid_pno(union scsw *scsw)
776{
777 if (scsw_is_tm(scsw))
778 return scsw_tm_is_valid_pno(scsw);
779 else
780 return scsw_cmd_is_valid_pno(scsw);
781}
782EXPORT_SYMBOL(scsw_is_valid_pno);
783
784/**
785 * scsw_is_valid_stctl - check stctl field validity
786 * @scsw: pointer to scsw
787 *
788 * Return non-zero if the stctl field of the specified scsw is valid,
789 * regardless of whether it is a transport mode or command mode scsw.
790 * Return zero if the field does not contain a valid value.
791 */
792int scsw_is_valid_stctl(union scsw *scsw)
793{
794 if (scsw_is_tm(scsw))
795 return scsw_tm_is_valid_stctl(scsw);
796 else
797 return scsw_cmd_is_valid_stctl(scsw);
798}
799EXPORT_SYMBOL(scsw_is_valid_stctl);
800
801/**
802 * scsw_cmd_is_solicited - check for solicited scsw
803 * @scsw: pointer to scsw
804 *
805 * Return non-zero if the command mode scsw indicates that the associated
806 * status condition is solicited, zero if it is unsolicited.
807 */
808int scsw_cmd_is_solicited(union scsw *scsw)
809{
810 return (scsw->cmd.cc != 0) || (scsw->cmd.stctl !=
811 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
812}
813EXPORT_SYMBOL(scsw_cmd_is_solicited);
814
815/**
816 * scsw_tm_is_solicited - check for solicited scsw
817 * @scsw: pointer to scsw
818 *
819 * Return non-zero if the transport mode scsw indicates that the associated
820 * status condition is solicited, zero if it is unsolicited.
821 */
822int scsw_tm_is_solicited(union scsw *scsw)
823{
824 return (scsw->tm.cc != 0) || (scsw->tm.stctl !=
825 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS));
826}
827EXPORT_SYMBOL(scsw_tm_is_solicited);
828
829/**
830 * scsw_is_solicited - check for solicited scsw
831 * @scsw: pointer to scsw
832 *
833 * Return non-zero if the transport or command mode scsw indicates that the
834 * associated status condition is solicited, zero if it is unsolicited.
835 */
836int scsw_is_solicited(union scsw *scsw)
837{
838 if (scsw_is_tm(scsw))
839 return scsw_tm_is_solicited(scsw);
840 else
841 return scsw_cmd_is_solicited(scsw);
842}
843EXPORT_SYMBOL(scsw_is_solicited);