aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/airq.c171
-rw-r--r--drivers/s390/cio/airq.h10
-rw-r--r--drivers/s390/cio/blacklist.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c25
-rw-r--r--drivers/s390/cio/chsc.c121
-rw-r--r--drivers/s390/cio/cio.c106
-rw-r--r--drivers/s390/cio/cio.h89
-rw-r--r--drivers/s390/cio/cio_debug.h22
-rw-r--r--drivers/s390/cio/css.c198
-rw-r--r--drivers/s390/cio/css.h78
-rw-r--r--drivers/s390/cio/device.c157
-rw-r--r--drivers/s390/cio/device.h5
-rw-r--r--drivers/s390/cio/device_fsm.c124
-rw-r--r--drivers/s390/cio/device_id.c9
-rw-r--r--drivers/s390/cio/device_ops.c2
-rw-r--r--drivers/s390/cio/device_pgid.c6
-rw-r--r--drivers/s390/cio/device_status.c13
-rw-r--r--drivers/s390/cio/io_sch.h163
-rw-r--r--drivers/s390/cio/ioasm.h66
-rw-r--r--drivers/s390/cio/qdio.c38
-rw-r--r--drivers/s390/cio/qdio.h2
21 files changed, 866 insertions, 541 deletions
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 5287631fbfc8..b7a07a866291 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * drivers/s390/cio/airq.c 2 * drivers/s390/cio/airq.c
3 * S/390 common I/O routines -- support for adapter interruptions 3 * Support for adapter interruptions
4 * 4 *
5 * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, 5 * Copyright IBM Corp. 1999,2007
6 * IBM Corporation 6 * Author(s): Ingo Adlung <adlung@de.ibm.com>
7 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck <cornelia.huck@de.ibm.com>
8 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann <arndb@de.ibm.com>
9 * Arnd Bergmann (arndb@de.ibm.com) 9 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/init.h> 12#include <linux/init.h>
@@ -14,72 +14,131 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/rcupdate.h> 15#include <linux/rcupdate.h>
16 16
17#include <asm/airq.h>
18
19#include "cio.h"
17#include "cio_debug.h" 20#include "cio_debug.h"
18#include "airq.h"
19 21
20static adapter_int_handler_t adapter_handler; 22#define NR_AIRQS 32
23#define NR_AIRQS_PER_WORD sizeof(unsigned long)
24#define NR_AIRQ_WORDS (NR_AIRQS / NR_AIRQS_PER_WORD)
21 25
22/* 26union indicator_t {
23 * register for adapter interrupts 27 unsigned long word[NR_AIRQ_WORDS];
24 * 28 unsigned char byte[NR_AIRQS];
25 * With HiperSockets the zSeries architecture provides for 29} __attribute__((packed));
26 * means of adapter interrups, pseudo I/O interrupts that are
27 * not tied to an I/O subchannel, but to an adapter. However,
28 * it doesn't disclose the info how to enable/disable them, but
29 * to recognize them only. Perhaps we should consider them
30 * being shared interrupts, and thus build a linked list
31 * of adapter handlers ... to be evaluated ...
32 */
33int
34s390_register_adapter_interrupt (adapter_int_handler_t handler)
35{
36 int ret;
37 char dbf_txt[15];
38 30
39 CIO_TRACE_EVENT (4, "rgaint"); 31struct airq_t {
32 adapter_int_handler_t handler;
33 void *drv_data;
34};
40 35
41 if (handler == NULL) 36static union indicator_t indicators;
42 ret = -EINVAL; 37static struct airq_t *airqs[NR_AIRQS];
43 else
44 ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
45 if (!ret)
46 synchronize_sched(); /* Allow interrupts to complete. */
47 38
48 sprintf (dbf_txt, "ret:%d", ret); 39static int register_airq(struct airq_t *airq)
49 CIO_TRACE_EVENT (4, dbf_txt); 40{
41 int i;
50 42
51 return ret; 43 for (i = 0; i < NR_AIRQS; i++)
44 if (!cmpxchg(&airqs[i], NULL, airq))
45 return i;
46 return -ENOMEM;
52} 47}
53 48
54int 49/**
55s390_unregister_adapter_interrupt (adapter_int_handler_t handler) 50 * s390_register_adapter_interrupt() - register adapter interrupt handler
51 * @handler: adapter handler to be registered
52 * @drv_data: driver data passed with each call to the handler
53 *
54 * Returns:
55 * Pointer to the indicator to be used on success
56 * ERR_PTR() if registration failed
57 */
58void *s390_register_adapter_interrupt(adapter_int_handler_t handler,
59 void *drv_data)
56{ 60{
61 struct airq_t *airq;
62 char dbf_txt[16];
57 int ret; 63 int ret;
58 char dbf_txt[15];
59 64
60 CIO_TRACE_EVENT (4, "urgaint"); 65 airq = kmalloc(sizeof(struct airq_t), GFP_KERNEL);
61 66 if (!airq) {
62 if (handler == NULL) 67 ret = -ENOMEM;
63 ret = -EINVAL; 68 goto out;
64 else {
65 adapter_handler = NULL;
66 synchronize_sched(); /* Allow interrupts to complete. */
67 ret = 0;
68 } 69 }
69 sprintf (dbf_txt, "ret:%d", ret); 70 airq->handler = handler;
70 CIO_TRACE_EVENT (4, dbf_txt); 71 airq->drv_data = drv_data;
71 72 ret = register_airq(airq);
72 return ret; 73 if (ret < 0)
74 kfree(airq);
75out:
76 snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%d", ret);
77 CIO_TRACE_EVENT(4, dbf_txt);
78 if (ret < 0)
79 return ERR_PTR(ret);
80 else
81 return &indicators.byte[ret];
73} 82}
83EXPORT_SYMBOL(s390_register_adapter_interrupt);
74 84
75void 85/**
76do_adapter_IO (void) 86 * s390_unregister_adapter_interrupt - unregister adapter interrupt handler
87 * @ind: indicator for which the handler is to be unregistered
88 */
89void s390_unregister_adapter_interrupt(void *ind)
77{ 90{
78 CIO_TRACE_EVENT (6, "doaio"); 91 struct airq_t *airq;
92 char dbf_txt[16];
93 int i;
79 94
80 if (adapter_handler) 95 i = (int) ((addr_t) ind) - ((addr_t) &indicators.byte[0]);
81 (*adapter_handler) (); 96 snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%d", i);
97 CIO_TRACE_EVENT(4, dbf_txt);
98 indicators.byte[i] = 0;
99 airq = xchg(&airqs[i], NULL);
100 /*
101 * Allow interrupts to complete. This will ensure that the airq handle
102 * is no longer referenced by any interrupt handler.
103 */
104 synchronize_sched();
105 kfree(airq);
82} 106}
107EXPORT_SYMBOL(s390_unregister_adapter_interrupt);
108
109#define INDICATOR_MASK (0xffUL << ((NR_AIRQS_PER_WORD - 1) * 8))
83 110
84EXPORT_SYMBOL (s390_register_adapter_interrupt); 111void do_adapter_IO(void)
85EXPORT_SYMBOL (s390_unregister_adapter_interrupt); 112{
113 int w;
114 int i;
115 unsigned long word;
116 struct airq_t *airq;
117
118 /*
119 * Access indicator array in word-sized chunks to minimize storage
120 * fetch operations.
121 */
122 for (w = 0; w < NR_AIRQ_WORDS; w++) {
123 word = indicators.word[w];
124 i = w * NR_AIRQS_PER_WORD;
125 /*
126 * Check bytes within word for active indicators.
127 */
128 while (word) {
129 if (word & INDICATOR_MASK) {
130 airq = airqs[i];
131 if (likely(airq))
132 airq->handler(&indicators.byte[i],
133 airq->drv_data);
134 else
135 /*
136 * Reset ill-behaved indicator.
137 */
138 indicators.byte[i] = 0;
139 }
140 word <<= 8;
141 i++;
142 }
143 }
144}
diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h
deleted file mode 100644
index 7d6be3fdcd66..000000000000
--- a/drivers/s390/cio/airq.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef S390_AINTERRUPT_H
2#define S390_AINTERRUPT_H
3
4typedef int (*adapter_int_handler_t)(void);
5
6extern int s390_register_adapter_interrupt(adapter_int_handler_t handler);
7extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler);
8extern void do_adapter_IO (void);
9
10#endif
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index bd5f16f80bf8..e8597ec92247 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -348,7 +348,7 @@ cio_ignore_write(struct file *file, const char __user *user_buf,
348 return user_len; 348 return user_len;
349} 349}
350 350
351static struct seq_operations cio_ignore_proc_seq_ops = { 351static const struct seq_operations cio_ignore_proc_seq_ops = {
352 .start = cio_ignore_proc_seq_start, 352 .start = cio_ignore_proc_seq_start,
353 .stop = cio_ignore_proc_seq_stop, 353 .stop = cio_ignore_proc_seq_stop,
354 .next = cio_ignore_proc_seq_next, 354 .next = cio_ignore_proc_seq_next,
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 5baa517c3b66..3964056a9a47 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -35,8 +35,8 @@ ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
35 struct ccwgroup_device *gdev; 35 struct ccwgroup_device *gdev;
36 struct ccwgroup_driver *gdrv; 36 struct ccwgroup_driver *gdrv;
37 37
38 gdev = container_of(dev, struct ccwgroup_device, dev); 38 gdev = to_ccwgroupdev(dev);
39 gdrv = container_of(drv, struct ccwgroup_driver, driver); 39 gdrv = to_ccwgroupdrv(drv);
40 40
41 if (gdev->creator_id == gdrv->driver_id) 41 if (gdev->creator_id == gdrv->driver_id)
42 return 1; 42 return 1;
@@ -75,8 +75,10 @@ static void ccwgroup_ungroup_callback(struct device *dev)
75 struct ccwgroup_device *gdev = to_ccwgroupdev(dev); 75 struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
76 76
77 mutex_lock(&gdev->reg_mutex); 77 mutex_lock(&gdev->reg_mutex);
78 __ccwgroup_remove_symlinks(gdev); 78 if (device_is_registered(&gdev->dev)) {
79 device_unregister(dev); 79 __ccwgroup_remove_symlinks(gdev);
80 device_unregister(dev);
81 }
80 mutex_unlock(&gdev->reg_mutex); 82 mutex_unlock(&gdev->reg_mutex);
81} 83}
82 84
@@ -111,7 +113,7 @@ ccwgroup_release (struct device *dev)
111 gdev = to_ccwgroupdev(dev); 113 gdev = to_ccwgroupdev(dev);
112 114
113 for (i = 0; i < gdev->count; i++) { 115 for (i = 0; i < gdev->count; i++) {
114 gdev->cdev[i]->dev.driver_data = NULL; 116 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
115 put_device(&gdev->cdev[i]->dev); 117 put_device(&gdev->cdev[i]->dev);
116 } 118 }
117 kfree(gdev); 119 kfree(gdev);
@@ -196,11 +198,11 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
196 goto error; 198 goto error;
197 } 199 }
198 /* Don't allow a device to belong to more than one group. */ 200 /* Don't allow a device to belong to more than one group. */
199 if (gdev->cdev[i]->dev.driver_data) { 201 if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
200 rc = -EINVAL; 202 rc = -EINVAL;
201 goto error; 203 goto error;
202 } 204 }
203 gdev->cdev[i]->dev.driver_data = gdev; 205 dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
204 } 206 }
205 207
206 gdev->creator_id = creator_id; 208 gdev->creator_id = creator_id;
@@ -234,8 +236,8 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
234error: 236error:
235 for (i = 0; i < argc; i++) 237 for (i = 0; i < argc; i++)
236 if (gdev->cdev[i]) { 238 if (gdev->cdev[i]) {
237 if (gdev->cdev[i]->dev.driver_data == gdev) 239 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
238 gdev->cdev[i]->dev.driver_data = NULL; 240 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
239 put_device(&gdev->cdev[i]->dev); 241 put_device(&gdev->cdev[i]->dev);
240 } 242 }
241 mutex_unlock(&gdev->reg_mutex); 243 mutex_unlock(&gdev->reg_mutex);
@@ -408,6 +410,7 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
408 /* register our new driver with the core */ 410 /* register our new driver with the core */
409 cdriver->driver.bus = &ccwgroup_bus_type; 411 cdriver->driver.bus = &ccwgroup_bus_type;
410 cdriver->driver.name = cdriver->name; 412 cdriver->driver.name = cdriver->name;
413 cdriver->driver.owner = cdriver->owner;
411 414
412 return driver_register(&cdriver->driver); 415 return driver_register(&cdriver->driver);
413} 416}
@@ -463,8 +466,8 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
463{ 466{
464 struct ccwgroup_device *gdev; 467 struct ccwgroup_device *gdev;
465 468
466 if (cdev->dev.driver_data) { 469 gdev = dev_get_drvdata(&cdev->dev);
467 gdev = (struct ccwgroup_device *)cdev->dev.driver_data; 470 if (gdev) {
468 if (get_device(&gdev->dev)) { 471 if (get_device(&gdev->dev)) {
469 mutex_lock(&gdev->reg_mutex); 472 mutex_lock(&gdev->reg_mutex);
470 if (device_is_registered(&gdev->dev)) 473 if (device_is_registered(&gdev->dev))
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 597c0c76a2ad..e7ba16a74ef7 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -89,7 +89,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
89 /* Copy data */ 89 /* Copy data */
90 ret = 0; 90 ret = 0;
91 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
92 if ((ssd_area->st != 0) && (ssd_area->st != 2)) 92 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
93 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
93 goto out_free; 94 goto out_free;
94 ssd->path_mask = ssd_area->path_mask; 95 ssd->path_mask = ssd_area->path_mask;
95 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 96 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
@@ -132,20 +133,16 @@ static void terminate_internal_io(struct subchannel *sch)
132 device_set_intretry(sch); 133 device_set_intretry(sch);
133 /* Call handler. */ 134 /* Call handler. */
134 if (sch->driver && sch->driver->termination) 135 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev); 136 sch->driver->termination(sch);
136} 137}
137 138
138static int 139static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
139s390_subchannel_remove_chpid(struct device *dev, void *data)
140{ 140{
141 int j; 141 int j;
142 int mask; 142 int mask;
143 struct subchannel *sch; 143 struct chp_id *chpid = data;
144 struct chp_id *chpid;
145 struct schib schib; 144 struct schib schib;
146 145
147 sch = to_subchannel(dev);
148 chpid = data;
149 for (j = 0; j < 8; j++) { 146 for (j = 0; j < 8; j++) {
150 mask = 0x80 >> j; 147 mask = 0x80 >> j;
151 if ((sch->schib.pmcw.pim & mask) && 148 if ((sch->schib.pmcw.pim & mask) &&
@@ -158,7 +155,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
158 spin_lock_irq(sch->lock); 155 spin_lock_irq(sch->lock);
159 156
160 stsch(sch->schid, &schib); 157 stsch(sch->schid, &schib);
161 if (!schib.pmcw.dnv) 158 if (!css_sch_is_valid(&schib))
162 goto out_unreg; 159 goto out_unreg;
163 memcpy(&sch->schib, &schib, sizeof(struct schib)); 160 memcpy(&sch->schib, &schib, sizeof(struct schib));
164 /* Check for single path devices. */ 161 /* Check for single path devices. */
@@ -172,12 +169,12 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
172 terminate_internal_io(sch); 169 terminate_internal_io(sch);
173 /* Re-start path verification. */ 170 /* Re-start path verification. */
174 if (sch->driver && sch->driver->verify) 171 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev); 172 sch->driver->verify(sch);
176 } 173 }
177 } else { 174 } else {
178 /* trigger path verification. */ 175 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify) 176 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev); 177 sch->driver->verify(sch);
181 else if (sch->lpm == mask) 178 else if (sch->lpm == mask)
182 goto out_unreg; 179 goto out_unreg;
183 } 180 }
@@ -201,12 +198,10 @@ void chsc_chp_offline(struct chp_id chpid)
201 198
202 if (chp_get_status(chpid) <= 0) 199 if (chp_get_status(chpid) <= 0)
203 return; 200 return;
204 bus_for_each_dev(&css_bus_type, NULL, &chpid, 201 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
205 s390_subchannel_remove_chpid);
206} 202}
207 203
208static int 204static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
209s390_process_res_acc_new_sch(struct subchannel_id schid)
210{ 205{
211 struct schib schib; 206 struct schib schib;
212 /* 207 /*
@@ -252,18 +247,10 @@ static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
252 return 0; 247 return 0;
253} 248}
254 249
255static int 250static int __s390_process_res_acc(struct subchannel *sch, void *data)
256__s390_process_res_acc(struct subchannel_id schid, void *data)
257{ 251{
258 int chp_mask, old_lpm; 252 int chp_mask, old_lpm;
259 struct res_acc_data *res_data; 253 struct res_acc_data *res_data = data;
260 struct subchannel *sch;
261
262 res_data = data;
263 sch = get_subchannel_by_schid(schid);
264 if (!sch)
265 /* Check if a subchannel is newly available. */
266 return s390_process_res_acc_new_sch(schid);
267 254
268 spin_lock_irq(sch->lock); 255 spin_lock_irq(sch->lock);
269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); 256 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
@@ -279,10 +266,10 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
279 if (!old_lpm && sch->lpm) 266 if (!old_lpm && sch->lpm)
280 device_trigger_reprobe(sch); 267 device_trigger_reprobe(sch);
281 else if (sch->driver && sch->driver->verify) 268 else if (sch->driver && sch->driver->verify)
282 sch->driver->verify(&sch->dev); 269 sch->driver->verify(sch);
283out: 270out:
284 spin_unlock_irq(sch->lock); 271 spin_unlock_irq(sch->lock);
285 put_device(&sch->dev); 272
286 return 0; 273 return 0;
287} 274}
288 275
@@ -305,7 +292,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
305 * The more information we have (info), the less scanning 292 * The more information we have (info), the less scanning
306 * will we have to do. 293 * will we have to do.
307 */ 294 */
308 for_each_subchannel(__s390_process_res_acc, res_data); 295 for_each_subchannel_staged(__s390_process_res_acc,
296 s390_process_res_acc_new_sch, res_data);
309} 297}
310 298
311static int 299static int
@@ -499,8 +487,7 @@ void chsc_process_crw(void)
499 } while (sei_area->flags & 0x80); 487 } while (sei_area->flags & 0x80);
500} 488}
501 489
502static int 490static int __chp_add_new_sch(struct subchannel_id schid, void *data)
503__chp_add_new_sch(struct subchannel_id schid)
504{ 491{
505 struct schib schib; 492 struct schib schib;
506 493
@@ -514,45 +501,37 @@ __chp_add_new_sch(struct subchannel_id schid)
514} 501}
515 502
516 503
517static int 504static int __chp_add(struct subchannel *sch, void *data)
518__chp_add(struct subchannel_id schid, void *data)
519{ 505{
520 int i, mask; 506 int i, mask;
521 struct chp_id *chpid; 507 struct chp_id *chpid = data;
522 struct subchannel *sch; 508
523
524 chpid = data;
525 sch = get_subchannel_by_schid(schid);
526 if (!sch)
527 /* Check if the subchannel is now available. */
528 return __chp_add_new_sch(schid);
529 spin_lock_irq(sch->lock); 509 spin_lock_irq(sch->lock);
530 for (i=0; i<8; i++) { 510 for (i=0; i<8; i++) {
531 mask = 0x80 >> i; 511 mask = 0x80 >> i;
532 if ((sch->schib.pmcw.pim & mask) && 512 if ((sch->schib.pmcw.pim & mask) &&
533 (sch->schib.pmcw.chpid[i] == chpid->id)) { 513 (sch->schib.pmcw.chpid[i] == chpid->id))
534 if (stsch(sch->schid, &sch->schib) != 0) {
535 /* Endgame. */
536 spin_unlock_irq(sch->lock);
537 return -ENXIO;
538 }
539 break; 514 break;
540 }
541 } 515 }
542 if (i==8) { 516 if (i==8) {
543 spin_unlock_irq(sch->lock); 517 spin_unlock_irq(sch->lock);
544 return 0; 518 return 0;
545 } 519 }
520 if (stsch(sch->schid, &sch->schib)) {
521 spin_unlock_irq(sch->lock);
522 css_schedule_eval(sch->schid);
523 return 0;
524 }
546 sch->lpm = ((sch->schib.pmcw.pim & 525 sch->lpm = ((sch->schib.pmcw.pim &
547 sch->schib.pmcw.pam & 526 sch->schib.pmcw.pam &
548 sch->schib.pmcw.pom) 527 sch->schib.pmcw.pom)
549 | mask) & sch->opm; 528 | mask) & sch->opm;
550 529
551 if (sch->driver && sch->driver->verify) 530 if (sch->driver && sch->driver->verify)
552 sch->driver->verify(&sch->dev); 531 sch->driver->verify(sch);
553 532
554 spin_unlock_irq(sch->lock); 533 spin_unlock_irq(sch->lock);
555 put_device(&sch->dev); 534
556 return 0; 535 return 0;
557} 536}
558 537
@@ -564,7 +543,8 @@ void chsc_chp_online(struct chp_id chpid)
564 CIO_TRACE_EVENT(2, dbf_txt); 543 CIO_TRACE_EVENT(2, dbf_txt);
565 544
566 if (chp_get_status(chpid) != 0) 545 if (chp_get_status(chpid) != 0)
567 for_each_subchannel(__chp_add, &chpid); 546 for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
547 &chpid);
568} 548}
569 549
570static void __s390_subchannel_vary_chpid(struct subchannel *sch, 550static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@@ -589,7 +569,7 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
589 if (!old_lpm) 569 if (!old_lpm)
590 device_trigger_reprobe(sch); 570 device_trigger_reprobe(sch);
591 else if (sch->driver && sch->driver->verify) 571 else if (sch->driver && sch->driver->verify)
592 sch->driver->verify(&sch->dev); 572 sch->driver->verify(sch);
593 break; 573 break;
594 } 574 }
595 sch->opm &= ~mask; 575 sch->opm &= ~mask;
@@ -603,37 +583,29 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch,
603 terminate_internal_io(sch); 583 terminate_internal_io(sch);
604 /* Re-start path verification. */ 584 /* Re-start path verification. */
605 if (sch->driver && sch->driver->verify) 585 if (sch->driver && sch->driver->verify)
606 sch->driver->verify(&sch->dev); 586 sch->driver->verify(sch);
607 } 587 }
608 } else if (!sch->lpm) { 588 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0) 589 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid); 590 css_schedule_eval(sch->schid);
611 } else if (sch->driver && sch->driver->verify) 591 } else if (sch->driver && sch->driver->verify)
612 sch->driver->verify(&sch->dev); 592 sch->driver->verify(sch);
613 break; 593 break;
614 } 594 }
615 spin_unlock_irqrestore(sch->lock, flags); 595 spin_unlock_irqrestore(sch->lock, flags);
616} 596}
617 597
618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data) 598static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
619{ 599{
620 struct subchannel *sch; 600 struct chp_id *chpid = data;
621 struct chp_id *chpid;
622
623 sch = to_subchannel(dev);
624 chpid = data;
625 601
626 __s390_subchannel_vary_chpid(sch, *chpid, 0); 602 __s390_subchannel_vary_chpid(sch, *chpid, 0);
627 return 0; 603 return 0;
628} 604}
629 605
630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data) 606static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
631{ 607{
632 struct subchannel *sch; 608 struct chp_id *chpid = data;
633 struct chp_id *chpid;
634
635 sch = to_subchannel(dev);
636 chpid = data;
637 609
638 __s390_subchannel_vary_chpid(sch, *chpid, 1); 610 __s390_subchannel_vary_chpid(sch, *chpid, 1);
639 return 0; 611 return 0;
@@ -643,13 +615,7 @@ static int
643__s390_vary_chpid_on(struct subchannel_id schid, void *data) 615__s390_vary_chpid_on(struct subchannel_id schid, void *data)
644{ 616{
645 struct schib schib; 617 struct schib schib;
646 struct subchannel *sch;
647 618
648 sch = get_subchannel_by_schid(schid);
649 if (sch) {
650 put_device(&sch->dev);
651 return 0;
652 }
653 if (stsch_err(schid, &schib)) 619 if (stsch_err(schid, &schib))
654 /* We're through */ 620 /* We're through */
655 return -ENXIO; 621 return -ENXIO;
@@ -669,12 +635,13 @@ int chsc_chp_vary(struct chp_id chpid, int on)
669 * Redo PathVerification on the devices the chpid connects to 635 * Redo PathVerification on the devices the chpid connects to
670 */ 636 */
671 637
672 bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
673 s390_subchannel_vary_chpid_on :
674 s390_subchannel_vary_chpid_off);
675 if (on) 638 if (on)
676 /* Scan for new devices on varied on path. */ 639 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
677 for_each_subchannel(__s390_vary_chpid_on, NULL); 640 __s390_vary_chpid_on, &chpid);
641 else
642 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
643 NULL, &chpid);
644
678 return 0; 645 return 0;
679} 646}
680 647
@@ -1075,7 +1042,7 @@ chsc_determine_css_characteristics(void)
1075 1042
1076 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1043 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1077 if (!scsc_area) { 1044 if (!scsc_area) {
1078 CIO_MSG_EVENT(0, "Was not able to determine available" 1045 CIO_MSG_EVENT(0, "Was not able to determine available "
1079 "CHSCs due to no memory.\n"); 1046 "CHSCs due to no memory.\n");
1080 return -ENOMEM; 1047 return -ENOMEM;
1081 } 1048 }
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 46905345159e..60590a12d529 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -23,11 +23,12 @@
23#include <asm/reset.h> 23#include <asm/reset.h>
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/chpid.h> 25#include <asm/chpid.h>
26#include "airq.h" 26#include <asm/airq.h>
27#include "cio.h" 27#include "cio.h"
28#include "css.h" 28#include "css.h"
29#include "chsc.h" 29#include "chsc.h"
30#include "ioasm.h" 30#include "ioasm.h"
31#include "io_sch.h"
31#include "blacklist.h" 32#include "blacklist.h"
32#include "cio_debug.h" 33#include "cio_debug.h"
33#include "chp.h" 34#include "chp.h"
@@ -56,39 +57,37 @@ __setup ("cio_msg=", cio_setup);
56 57
57/* 58/*
58 * Function: cio_debug_init 59 * Function: cio_debug_init
59 * Initializes three debug logs (under /proc/s390dbf) for common I/O: 60 * Initializes three debug logs for common I/O:
60 * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on 61 * - cio_msg logs generic cio messages
61 * - cio_trace logs the calling of different functions 62 * - cio_trace logs the calling of different functions
62 * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on 63 * - cio_crw logs machine check related cio messages
63 * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW
64 */ 64 */
65static int __init 65static int __init cio_debug_init(void)
66cio_debug_init (void)
67{ 66{
68 cio_debug_msg_id = debug_register ("cio_msg", 16, 4, 16*sizeof (long)); 67 cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
69 if (!cio_debug_msg_id) 68 if (!cio_debug_msg_id)
70 goto out_unregister; 69 goto out_unregister;
71 debug_register_view (cio_debug_msg_id, &debug_sprintf_view); 70 debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
72 debug_set_level (cio_debug_msg_id, 2); 71 debug_set_level(cio_debug_msg_id, 2);
73 cio_debug_trace_id = debug_register ("cio_trace", 16, 4, 16); 72 cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
74 if (!cio_debug_trace_id) 73 if (!cio_debug_trace_id)
75 goto out_unregister; 74 goto out_unregister;
76 debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view); 75 debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
77 debug_set_level (cio_debug_trace_id, 2); 76 debug_set_level(cio_debug_trace_id, 2);
78 cio_debug_crw_id = debug_register ("cio_crw", 4, 4, 16*sizeof (long)); 77 cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
79 if (!cio_debug_crw_id) 78 if (!cio_debug_crw_id)
80 goto out_unregister; 79 goto out_unregister;
81 debug_register_view (cio_debug_crw_id, &debug_sprintf_view); 80 debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
82 debug_set_level (cio_debug_crw_id, 2); 81 debug_set_level(cio_debug_crw_id, 4);
83 return 0; 82 return 0;
84 83
85out_unregister: 84out_unregister:
86 if (cio_debug_msg_id) 85 if (cio_debug_msg_id)
87 debug_unregister (cio_debug_msg_id); 86 debug_unregister(cio_debug_msg_id);
88 if (cio_debug_trace_id) 87 if (cio_debug_trace_id)
89 debug_unregister (cio_debug_trace_id); 88 debug_unregister(cio_debug_trace_id);
90 if (cio_debug_crw_id) 89 if (cio_debug_crw_id)
91 debug_unregister (cio_debug_crw_id); 90 debug_unregister(cio_debug_crw_id);
92 printk(KERN_WARNING"cio: could not initialize debugging\n"); 91 printk(KERN_WARNING"cio: could not initialize debugging\n");
93 return -1; 92 return -1;
94} 93}
@@ -147,7 +146,7 @@ cio_tpi(void)
147 spin_lock(sch->lock); 146 spin_lock(sch->lock);
148 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 147 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
149 if (sch->driver && sch->driver->irq) 148 if (sch->driver && sch->driver->irq)
150 sch->driver->irq(&sch->dev); 149 sch->driver->irq(sch);
151 spin_unlock(sch->lock); 150 spin_unlock(sch->lock);
152 irq_exit (); 151 irq_exit ();
153 _local_bh_enable(); 152 _local_bh_enable();
@@ -184,33 +183,35 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
184{ 183{
185 char dbf_txt[15]; 184 char dbf_txt[15];
186 int ccode; 185 int ccode;
186 struct orb *orb;
187 187
188 CIO_TRACE_EVENT (4, "stIO"); 188 CIO_TRACE_EVENT(4, "stIO");
189 CIO_TRACE_EVENT (4, sch->dev.bus_id); 189 CIO_TRACE_EVENT(4, sch->dev.bus_id);
190 190
191 orb = &to_io_private(sch)->orb;
191 /* sch is always under 2G. */ 192 /* sch is always under 2G. */
192 sch->orb.intparm = (__u32)(unsigned long)sch; 193 orb->intparm = (u32)(addr_t)sch;
193 sch->orb.fmt = 1; 194 orb->fmt = 1;
194 195
195 sch->orb.pfch = sch->options.prefetch == 0; 196 orb->pfch = sch->options.prefetch == 0;
196 sch->orb.spnd = sch->options.suspend; 197 orb->spnd = sch->options.suspend;
197 sch->orb.ssic = sch->options.suspend && sch->options.inter; 198 orb->ssic = sch->options.suspend && sch->options.inter;
198 sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm; 199 orb->lpm = (lpm != 0) ? lpm : sch->lpm;
199#ifdef CONFIG_64BIT 200#ifdef CONFIG_64BIT
200 /* 201 /*
201 * for 64 bit we always support 64 bit IDAWs with 4k page size only 202 * for 64 bit we always support 64 bit IDAWs with 4k page size only
202 */ 203 */
203 sch->orb.c64 = 1; 204 orb->c64 = 1;
204 sch->orb.i2k = 0; 205 orb->i2k = 0;
205#endif 206#endif
206 sch->orb.key = key >> 4; 207 orb->key = key >> 4;
207 /* issue "Start Subchannel" */ 208 /* issue "Start Subchannel" */
208 sch->orb.cpa = (__u32) __pa (cpa); 209 orb->cpa = (__u32) __pa(cpa);
209 ccode = ssch (sch->schid, &sch->orb); 210 ccode = ssch(sch->schid, orb);
210 211
211 /* process condition code */ 212 /* process condition code */
212 sprintf (dbf_txt, "ccode:%d", ccode); 213 sprintf(dbf_txt, "ccode:%d", ccode);
213 CIO_TRACE_EVENT (4, dbf_txt); 214 CIO_TRACE_EVENT(4, dbf_txt);
214 215
215 switch (ccode) { 216 switch (ccode) {
216 case 0: 217 case 0:
@@ -405,8 +406,8 @@ cio_modify (struct subchannel *sch)
405/* 406/*
406 * Enable subchannel. 407 * Enable subchannel.
407 */ 408 */
408int 409int cio_enable_subchannel(struct subchannel *sch, unsigned int isc,
409cio_enable_subchannel (struct subchannel *sch, unsigned int isc) 410 u32 intparm)
410{ 411{
411 char dbf_txt[15]; 412 char dbf_txt[15];
412 int ccode; 413 int ccode;
@@ -425,7 +426,7 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
425 for (retry = 5, ret = 0; retry > 0; retry--) { 426 for (retry = 5, ret = 0; retry > 0; retry--) {
426 sch->schib.pmcw.ena = 1; 427 sch->schib.pmcw.ena = 1;
427 sch->schib.pmcw.isc = isc; 428 sch->schib.pmcw.isc = isc;
428 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 429 sch->schib.pmcw.intparm = intparm;
429 ret = cio_modify(sch); 430 ret = cio_modify(sch);
430 if (ret == -ENODEV) 431 if (ret == -ENODEV)
431 break; 432 break;
@@ -567,7 +568,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
567 */ 568 */
568 if (sch->st != 0) { 569 if (sch->st != 0) {
569 CIO_DEBUG(KERN_INFO, 0, 570 CIO_DEBUG(KERN_INFO, 0,
570 "cio: Subchannel 0.%x.%04x reports " 571 "Subchannel 0.%x.%04x reports "
571 "non-I/O subchannel type %04X\n", 572 "non-I/O subchannel type %04X\n",
572 sch->schid.ssid, sch->schid.sch_no, sch->st); 573 sch->schid.ssid, sch->schid.sch_no, sch->st);
573 /* We stop here for non-io subchannels. */ 574 /* We stop here for non-io subchannels. */
@@ -576,11 +577,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
576 } 577 }
577 578
578 /* Initialization for io subchannels. */ 579 /* Initialization for io subchannels. */
579 if (!sch->schib.pmcw.dnv) { 580 if (!css_sch_is_valid(&sch->schib)) {
580 /* io subchannel but device number is invalid. */
581 err = -ENODEV; 581 err = -ENODEV;
582 goto out; 582 goto out;
583 } 583 }
584
584 /* Devno is valid. */ 585 /* Devno is valid. */
585 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { 586 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
586 /* 587 /*
@@ -600,7 +601,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
600 sch->lpm = sch->schib.pmcw.pam & sch->opm; 601 sch->lpm = sch->schib.pmcw.pam & sch->opm;
601 602
602 CIO_DEBUG(KERN_INFO, 0, 603 CIO_DEBUG(KERN_INFO, 0,
603 "cio: Detected device %04x on subchannel 0.%x.%04X" 604 "Detected device %04x on subchannel 0.%x.%04X"
604 " - PIM = %02X, PAM = %02X, POM = %02X\n", 605 " - PIM = %02X, PAM = %02X, POM = %02X\n",
605 sch->schib.pmcw.dev, sch->schid.ssid, 606 sch->schib.pmcw.dev, sch->schid.ssid,
606 sch->schid.sch_no, sch->schib.pmcw.pim, 607 sch->schid.sch_no, sch->schib.pmcw.pim,
@@ -680,7 +681,7 @@ do_IRQ (struct pt_regs *regs)
680 sizeof (irb->scsw)); 681 sizeof (irb->scsw));
681 /* Call interrupt handler if there is one. */ 682 /* Call interrupt handler if there is one. */
682 if (sch->driver && sch->driver->irq) 683 if (sch->driver && sch->driver->irq)
683 sch->driver->irq(&sch->dev); 684 sch->driver->irq(sch);
684 } 685 }
685 if (sch) 686 if (sch)
686 spin_unlock(sch->lock); 687 spin_unlock(sch->lock);
@@ -698,8 +699,14 @@ do_IRQ (struct pt_regs *regs)
698 699
699#ifdef CONFIG_CCW_CONSOLE 700#ifdef CONFIG_CCW_CONSOLE
700static struct subchannel console_subchannel; 701static struct subchannel console_subchannel;
702static struct io_subchannel_private console_priv;
701static int console_subchannel_in_use; 703static int console_subchannel_in_use;
702 704
705void *cio_get_console_priv(void)
706{
707 return &console_priv;
708}
709
703/* 710/*
704 * busy wait for the next interrupt on the console 711 * busy wait for the next interrupt on the console
705 */ 712 */
@@ -738,9 +745,9 @@ cio_test_for_console(struct subchannel_id schid, void *data)
738{ 745{
739 if (stsch_err(schid, &console_subchannel.schib) != 0) 746 if (stsch_err(schid, &console_subchannel.schib) != 0)
740 return -ENXIO; 747 return -ENXIO;
741 if (console_subchannel.schib.pmcw.dnv && 748 if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
742 console_subchannel.schib.pmcw.dev == 749 console_subchannel.schib.pmcw.dnv &&
743 console_devno) { 750 (console_subchannel.schib.pmcw.dev == console_devno)) {
744 console_irq = schid.sch_no; 751 console_irq = schid.sch_no;
745 return 1; /* found */ 752 return 1; /* found */
746 } 753 }
@@ -758,6 +765,7 @@ cio_get_console_sch_no(void)
758 /* VM provided us with the irq number of the console. */ 765 /* VM provided us with the irq number of the console. */
759 schid.sch_no = console_irq; 766 schid.sch_no = console_irq;
760 if (stsch(schid, &console_subchannel.schib) != 0 || 767 if (stsch(schid, &console_subchannel.schib) != 0 ||
768 (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
761 !console_subchannel.schib.pmcw.dnv) 769 !console_subchannel.schib.pmcw.dnv)
762 return -1; 770 return -1;
763 console_devno = console_subchannel.schib.pmcw.dev; 771 console_devno = console_subchannel.schib.pmcw.dev;
@@ -804,7 +812,7 @@ cio_probe_console(void)
804 ctl_set_bit(6, 24); 812 ctl_set_bit(6, 24);
805 console_subchannel.schib.pmcw.isc = 7; 813 console_subchannel.schib.pmcw.isc = 7;
806 console_subchannel.schib.pmcw.intparm = 814 console_subchannel.schib.pmcw.intparm =
807 (__u32)(unsigned long)&console_subchannel; 815 (u32)(addr_t)&console_subchannel;
808 ret = cio_modify(&console_subchannel); 816 ret = cio_modify(&console_subchannel);
809 if (ret) { 817 if (ret) {
810 console_subchannel_in_use = 0; 818 console_subchannel_in_use = 0;
@@ -1022,7 +1030,7 @@ static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
1022 1030
1023 if (stsch_reset(schid, &schib)) 1031 if (stsch_reset(schid, &schib))
1024 return -ENXIO; 1032 return -ENXIO;
1025 if (schib.pmcw.dnv && 1033 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
1026 (schib.pmcw.dev == match_id->devid.devno) && 1034 (schib.pmcw.dev == match_id->devid.devno) &&
1027 (schid.ssid == match_id->devid.ssid)) { 1035 (schid.ssid == match_id->devid.ssid)) {
1028 match_id->schid = schid; 1036 match_id->schid = schid;
@@ -1068,6 +1076,8 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1068 return -ENODEV; 1076 return -ENODEV;
1069 if (stsch(schid, &schib)) 1077 if (stsch(schid, &schib))
1070 return -ENODEV; 1078 return -ENODEV;
1079 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1080 return -ENODEV;
1071 if (!schib.pmcw.dnv) 1081 if (!schib.pmcw.dnv)
1072 return -ENODEV; 1082 return -ENODEV;
1073 iplinfo->devno = schib.pmcw.dev; 1083 iplinfo->devno = schib.pmcw.dev;
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 7446c39951a7..52afa4c784de 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -11,32 +11,32 @@
11 * path management control word 11 * path management control word
12 */ 12 */
13struct pmcw { 13struct pmcw {
14 __u32 intparm; /* interruption parameter */ 14 u32 intparm; /* interruption parameter */
15 __u32 qf : 1; /* qdio facility */ 15 u32 qf : 1; /* qdio facility */
16 __u32 res0 : 1; /* reserved zeros */ 16 u32 res0 : 1; /* reserved zeros */
17 __u32 isc : 3; /* interruption sublass */ 17 u32 isc : 3; /* interruption sublass */
18 __u32 res5 : 3; /* reserved zeros */ 18 u32 res5 : 3; /* reserved zeros */
19 __u32 ena : 1; /* enabled */ 19 u32 ena : 1; /* enabled */
20 __u32 lm : 2; /* limit mode */ 20 u32 lm : 2; /* limit mode */
21 __u32 mme : 2; /* measurement-mode enable */ 21 u32 mme : 2; /* measurement-mode enable */
22 __u32 mp : 1; /* multipath mode */ 22 u32 mp : 1; /* multipath mode */
23 __u32 tf : 1; /* timing facility */ 23 u32 tf : 1; /* timing facility */
24 __u32 dnv : 1; /* device number valid */ 24 u32 dnv : 1; /* device number valid */
25 __u32 dev : 16; /* device number */ 25 u32 dev : 16; /* device number */
26 __u8 lpm; /* logical path mask */ 26 u8 lpm; /* logical path mask */
27 __u8 pnom; /* path not operational mask */ 27 u8 pnom; /* path not operational mask */
28 __u8 lpum; /* last path used mask */ 28 u8 lpum; /* last path used mask */
29 __u8 pim; /* path installed mask */ 29 u8 pim; /* path installed mask */
30 __u16 mbi; /* measurement-block index */ 30 u16 mbi; /* measurement-block index */
31 __u8 pom; /* path operational mask */ 31 u8 pom; /* path operational mask */
32 __u8 pam; /* path available mask */ 32 u8 pam; /* path available mask */
33 __u8 chpid[8]; /* CHPID 0-7 (if available) */ 33 u8 chpid[8]; /* CHPID 0-7 (if available) */
34 __u32 unused1 : 8; /* reserved zeros */ 34 u32 unused1 : 8; /* reserved zeros */
35 __u32 st : 3; /* subchannel type */ 35 u32 st : 3; /* subchannel type */
36 __u32 unused2 : 18; /* reserved zeros */ 36 u32 unused2 : 18; /* reserved zeros */
37 __u32 mbfc : 1; /* measurement block format control */ 37 u32 mbfc : 1; /* measurement block format control */
38 __u32 xmwme : 1; /* extended measurement word mode enable */ 38 u32 xmwme : 1; /* extended measurement word mode enable */
39 __u32 csense : 1; /* concurrent sense; can be enabled ...*/ 39 u32 csense : 1; /* concurrent sense; can be enabled ...*/
40 /* ... per MSCH, however, if facility */ 40 /* ... per MSCH, however, if facility */
41 /* ... is not installed, this results */ 41 /* ... is not installed, this results */
42 /* ... in an operand exception. */ 42 /* ... in an operand exception. */
@@ -52,31 +52,6 @@ struct schib {
52 __u8 mda[4]; /* model dependent area */ 52 __u8 mda[4]; /* model dependent area */
53} __attribute__ ((packed,aligned(4))); 53} __attribute__ ((packed,aligned(4)));
54 54
55/*
56 * operation request block
57 */
58struct orb {
59 __u32 intparm; /* interruption parameter */
60 __u32 key : 4; /* flags, like key, suspend control, etc. */
61 __u32 spnd : 1; /* suspend control */
62 __u32 res1 : 1; /* reserved */
63 __u32 mod : 1; /* modification control */
64 __u32 sync : 1; /* synchronize control */
65 __u32 fmt : 1; /* format control */
66 __u32 pfch : 1; /* prefetch control */
67 __u32 isic : 1; /* initial-status-interruption control */
68 __u32 alcc : 1; /* address-limit-checking control */
69 __u32 ssic : 1; /* suppress-suspended-interr. control */
70 __u32 res2 : 1; /* reserved */
71 __u32 c64 : 1; /* IDAW/QDIO 64 bit control */
72 __u32 i2k : 1; /* IDAW 2/4kB block size control */
73 __u32 lpm : 8; /* logical path mask */
74 __u32 ils : 1; /* incorrect length */
75 __u32 zero : 6; /* reserved zeros */
76 __u32 orbx : 1; /* ORB extension control */
77 __u32 cpa; /* channel program address */
78} __attribute__ ((packed,aligned(4)));
79
80/* subchannel data structure used by I/O subroutines */ 55/* subchannel data structure used by I/O subroutines */
81struct subchannel { 56struct subchannel {
82 struct subchannel_id schid; 57 struct subchannel_id schid;
@@ -85,7 +60,7 @@ struct subchannel {
85 enum { 60 enum {
86 SUBCHANNEL_TYPE_IO = 0, 61 SUBCHANNEL_TYPE_IO = 0,
87 SUBCHANNEL_TYPE_CHSC = 1, 62 SUBCHANNEL_TYPE_CHSC = 1,
88 SUBCHANNEL_TYPE_MESSAGE = 2, 63 SUBCHANNEL_TYPE_MSG = 2,
89 SUBCHANNEL_TYPE_ADM = 3, 64 SUBCHANNEL_TYPE_ADM = 3,
90 } st; /* subchannel type */ 65 } st; /* subchannel type */
91 66
@@ -99,11 +74,10 @@ struct subchannel {
99 __u8 lpm; /* logical path mask */ 74 __u8 lpm; /* logical path mask */
100 __u8 opm; /* operational path mask */ 75 __u8 opm; /* operational path mask */
101 struct schib schib; /* subchannel information block */ 76 struct schib schib; /* subchannel information block */
102 struct orb orb; /* operation request block */
103 struct ccw1 sense_ccw; /* static ccw for sense command */
104 struct chsc_ssd_info ssd_info; /* subchannel description */ 77 struct chsc_ssd_info ssd_info; /* subchannel description */
105 struct device dev; /* entry in device tree */ 78 struct device dev; /* entry in device tree */
106 struct css_driver *driver; 79 struct css_driver *driver;
80 void *private; /* private per subchannel type data */
107} __attribute__ ((aligned(8))); 81} __attribute__ ((aligned(8)));
108 82
109#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ 83#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
@@ -111,7 +85,7 @@ struct subchannel {
111#define to_subchannel(n) container_of(n, struct subchannel, dev) 85#define to_subchannel(n) container_of(n, struct subchannel, dev)
112 86
113extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); 87extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
114extern int cio_enable_subchannel (struct subchannel *, unsigned int); 88extern int cio_enable_subchannel(struct subchannel *, unsigned int, u32);
115extern int cio_disable_subchannel (struct subchannel *); 89extern int cio_disable_subchannel (struct subchannel *);
116extern int cio_cancel (struct subchannel *); 90extern int cio_cancel (struct subchannel *);
117extern int cio_clear (struct subchannel *); 91extern int cio_clear (struct subchannel *);
@@ -125,6 +99,7 @@ extern int cio_get_options (struct subchannel *);
125extern int cio_modify (struct subchannel *); 99extern int cio_modify (struct subchannel *);
126 100
127int cio_create_sch_lock(struct subchannel *); 101int cio_create_sch_lock(struct subchannel *);
102void do_adapter_IO(void);
128 103
129/* Use with care. */ 104/* Use with care. */
130#ifdef CONFIG_CCW_CONSOLE 105#ifdef CONFIG_CCW_CONSOLE
@@ -133,10 +108,12 @@ extern void cio_release_console(void);
133extern int cio_is_console(struct subchannel_id); 108extern int cio_is_console(struct subchannel_id);
134extern struct subchannel *cio_get_console_subchannel(void); 109extern struct subchannel *cio_get_console_subchannel(void);
135extern spinlock_t * cio_get_console_lock(void); 110extern spinlock_t * cio_get_console_lock(void);
111extern void *cio_get_console_priv(void);
136#else 112#else
137#define cio_is_console(schid) 0 113#define cio_is_console(schid) 0
138#define cio_get_console_subchannel() NULL 114#define cio_get_console_subchannel() NULL
139#define cio_get_console_lock() NULL; 115#define cio_get_console_lock() NULL
116#define cio_get_console_priv() NULL
140#endif 117#endif
141 118
142extern int cio_show_msg; 119extern int cio_show_msg;
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h
index c9bf8989930f..d7429ef6c666 100644
--- a/drivers/s390/cio/cio_debug.h
+++ b/drivers/s390/cio/cio_debug.h
@@ -8,20 +8,19 @@ extern debug_info_t *cio_debug_msg_id;
8extern debug_info_t *cio_debug_trace_id; 8extern debug_info_t *cio_debug_trace_id;
9extern debug_info_t *cio_debug_crw_id; 9extern debug_info_t *cio_debug_crw_id;
10 10
11#define CIO_TRACE_EVENT(imp, txt) do { \ 11#define CIO_TRACE_EVENT(imp, txt) do { \
12 debug_text_event(cio_debug_trace_id, imp, txt); \ 12 debug_text_event(cio_debug_trace_id, imp, txt); \
13 } while (0) 13 } while (0)
14 14
15#define CIO_MSG_EVENT(imp, args...) do { \ 15#define CIO_MSG_EVENT(imp, args...) do { \
16 debug_sprintf_event(cio_debug_msg_id, imp , ##args); \ 16 debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
17 } while (0) 17 } while (0)
18 18
19#define CIO_CRW_EVENT(imp, args...) do { \ 19#define CIO_CRW_EVENT(imp, args...) do { \
20 debug_sprintf_event(cio_debug_crw_id, imp , ##args); \ 20 debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
21 } while (0) 21 } while (0)
22 22
23static inline void 23static inline void CIO_HEX_EVENT(int level, void *data, int length)
24CIO_HEX_EVENT(int level, void *data, int length)
25{ 24{
26 if (unlikely(!cio_debug_trace_id)) 25 if (unlikely(!cio_debug_trace_id))
27 return; 26 return;
@@ -32,9 +31,10 @@ CIO_HEX_EVENT(int level, void *data, int length)
32 } 31 }
33} 32}
34 33
35#define CIO_DEBUG(printk_level,event_level,msg...) ({ \ 34#define CIO_DEBUG(printk_level, event_level, msg...) do { \
36 if (cio_show_msg) printk(printk_level msg); \ 35 if (cio_show_msg) \
37 CIO_MSG_EVENT (event_level, msg); \ 36 printk(printk_level "cio: " msg); \
38}) 37 CIO_MSG_EVENT(event_level, msg); \
38 } while (0)
39 39
40#endif 40#endif
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c3df2cd009a4..3b45bbe6cce0 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -51,6 +51,62 @@ for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
51 return ret; 51 return ret;
52} 52}
53 53
54struct cb_data {
55 void *data;
56 struct idset *set;
57 int (*fn_known_sch)(struct subchannel *, void *);
58 int (*fn_unknown_sch)(struct subchannel_id, void *);
59};
60
61static int call_fn_known_sch(struct device *dev, void *data)
62{
63 struct subchannel *sch = to_subchannel(dev);
64 struct cb_data *cb = data;
65 int rc = 0;
66
67 idset_sch_del(cb->set, sch->schid);
68 if (cb->fn_known_sch)
69 rc = cb->fn_known_sch(sch, cb->data);
70 return rc;
71}
72
73static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
74{
75 struct cb_data *cb = data;
76 int rc = 0;
77
78 if (idset_sch_contains(cb->set, schid))
79 rc = cb->fn_unknown_sch(schid, cb->data);
80 return rc;
81}
82
83int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
84 int (*fn_unknown)(struct subchannel_id,
85 void *), void *data)
86{
87 struct cb_data cb;
88 int rc;
89
90 cb.set = idset_sch_new();
91 if (!cb.set)
92 return -ENOMEM;
93 idset_fill(cb.set);
94 cb.data = data;
95 cb.fn_known_sch = fn_known;
96 cb.fn_unknown_sch = fn_unknown;
97 /* Process registered subchannels. */
98 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
99 if (rc)
100 goto out;
101 /* Process unregistered subchannels. */
102 if (fn_unknown)
103 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
104out:
105 idset_free(cb.set);
106
107 return rc;
108}
109
54static struct subchannel * 110static struct subchannel *
55css_alloc_subchannel(struct subchannel_id schid) 111css_alloc_subchannel(struct subchannel_id schid)
56{ 112{
@@ -77,7 +133,7 @@ css_alloc_subchannel(struct subchannel_id schid)
77 * This is fine even on 64bit since the subchannel is always located 133 * This is fine even on 64bit since the subchannel is always located
78 * under 2G. 134 * under 2G.
79 */ 135 */
80 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 136 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
81 ret = cio_modify(sch); 137 ret = cio_modify(sch);
82 if (ret) { 138 if (ret) {
83 kfree(sch->lock); 139 kfree(sch->lock);
@@ -237,11 +293,25 @@ get_subchannel_by_schid(struct subchannel_id schid)
237 return dev ? to_subchannel(dev) : NULL; 293 return dev ? to_subchannel(dev) : NULL;
238} 294}
239 295
296/**
297 * css_sch_is_valid() - check if a subchannel is valid
298 * @schib: subchannel information block for the subchannel
299 */
300int css_sch_is_valid(struct schib *schib)
301{
302 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
303 return 0;
304 return 1;
305}
306EXPORT_SYMBOL_GPL(css_sch_is_valid);
307
240static int css_get_subchannel_status(struct subchannel *sch) 308static int css_get_subchannel_status(struct subchannel *sch)
241{ 309{
242 struct schib schib; 310 struct schib schib;
243 311
244 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) 312 if (stsch(sch->schid, &schib))
313 return CIO_GONE;
314 if (!css_sch_is_valid(&schib))
245 return CIO_GONE; 315 return CIO_GONE;
246 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) 316 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
247 return CIO_REVALIDATE; 317 return CIO_REVALIDATE;
@@ -293,7 +363,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
293 action = UNREGISTER; 363 action = UNREGISTER;
294 if (sch->driver && sch->driver->notify) { 364 if (sch->driver && sch->driver->notify) {
295 spin_unlock_irqrestore(sch->lock, flags); 365 spin_unlock_irqrestore(sch->lock, flags);
296 ret = sch->driver->notify(&sch->dev, event); 366 ret = sch->driver->notify(sch, event);
297 spin_lock_irqsave(sch->lock, flags); 367 spin_lock_irqsave(sch->lock, flags);
298 if (ret) 368 if (ret)
299 action = NONE; 369 action = NONE;
@@ -349,7 +419,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
349 /* Will be done on the slow path. */ 419 /* Will be done on the slow path. */
350 return -EAGAIN; 420 return -EAGAIN;
351 } 421 }
352 if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { 422 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
353 /* Unusable - ignore. */ 423 /* Unusable - ignore. */
354 return 0; 424 return 0;
355 } 425 }
@@ -388,20 +458,56 @@ static int __init slow_subchannel_init(void)
388 return 0; 458 return 0;
389} 459}
390 460
391static void css_slow_path_func(struct work_struct *unused) 461static int slow_eval_known_fn(struct subchannel *sch, void *data)
392{ 462{
393 struct subchannel_id schid; 463 int eval;
464 int rc;
394 465
395 CIO_TRACE_EVENT(4, "slowpath");
396 spin_lock_irq(&slow_subchannel_lock); 466 spin_lock_irq(&slow_subchannel_lock);
397 init_subchannel_id(&schid); 467 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
398 while (idset_sch_get_first(slow_subchannel_set, &schid)) { 468 idset_sch_del(slow_subchannel_set, sch->schid);
399 idset_sch_del(slow_subchannel_set, schid); 469 spin_unlock_irq(&slow_subchannel_lock);
400 spin_unlock_irq(&slow_subchannel_lock); 470 if (eval) {
401 css_evaluate_subchannel(schid, 1); 471 rc = css_evaluate_known_subchannel(sch, 1);
402 spin_lock_irq(&slow_subchannel_lock); 472 if (rc == -EAGAIN)
473 css_schedule_eval(sch->schid);
403 } 474 }
475 return 0;
476}
477
478static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
479{
480 int eval;
481 int rc = 0;
482
483 spin_lock_irq(&slow_subchannel_lock);
484 eval = idset_sch_contains(slow_subchannel_set, schid);
485 idset_sch_del(slow_subchannel_set, schid);
404 spin_unlock_irq(&slow_subchannel_lock); 486 spin_unlock_irq(&slow_subchannel_lock);
487 if (eval) {
488 rc = css_evaluate_new_subchannel(schid, 1);
489 switch (rc) {
490 case -EAGAIN:
491 css_schedule_eval(schid);
492 rc = 0;
493 break;
494 case -ENXIO:
495 case -ENOMEM:
496 case -EIO:
497 /* These should abort looping */
498 break;
499 default:
500 rc = 0;
501 }
502 }
503 return rc;
504}
505
506static void css_slow_path_func(struct work_struct *unused)
507{
508 CIO_TRACE_EVENT(4, "slowpath");
509 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
510 NULL);
405} 511}
406 512
407static DECLARE_WORK(slow_path_work, css_slow_path_func); 513static DECLARE_WORK(slow_path_work, css_slow_path_func);
@@ -430,7 +536,6 @@ void css_schedule_eval_all(void)
430/* Reprobe subchannel if unregistered. */ 536/* Reprobe subchannel if unregistered. */
431static int reprobe_subchannel(struct subchannel_id schid, void *data) 537static int reprobe_subchannel(struct subchannel_id schid, void *data)
432{ 538{
433 struct subchannel *sch;
434 int ret; 539 int ret;
435 540
436 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", 541 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
@@ -438,13 +543,6 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data)
438 if (need_reprobe) 543 if (need_reprobe)
439 return -EAGAIN; 544 return -EAGAIN;
440 545
441 sch = get_subchannel_by_schid(schid);
442 if (sch) {
443 /* Already known. */
444 put_device(&sch->dev);
445 return 0;
446 }
447
448 ret = css_probe_device(schid); 546 ret = css_probe_device(schid);
449 switch (ret) { 547 switch (ret) {
450 case 0: 548 case 0:
@@ -472,7 +570,7 @@ static void reprobe_all(struct work_struct *unused)
472 /* Make sure initial subchannel scan is done. */ 570 /* Make sure initial subchannel scan is done. */
473 wait_event(ccw_device_init_wq, 571 wait_event(ccw_device_init_wq,
474 atomic_read(&ccw_device_init_count) == 0); 572 atomic_read(&ccw_device_init_count) == 0);
475 ret = for_each_subchannel(reprobe_subchannel, NULL); 573 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
476 574
477 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 575 CIO_MSG_EVENT(2, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
478 need_reprobe); 576 need_reprobe);
@@ -787,8 +885,8 @@ int sch_is_pseudo_sch(struct subchannel *sch)
787static int 885static int
788css_bus_match (struct device *dev, struct device_driver *drv) 886css_bus_match (struct device *dev, struct device_driver *drv)
789{ 887{
790 struct subchannel *sch = container_of (dev, struct subchannel, dev); 888 struct subchannel *sch = to_subchannel(dev);
791 struct css_driver *driver = container_of (drv, struct css_driver, drv); 889 struct css_driver *driver = to_cssdriver(drv);
792 890
793 if (sch->st == driver->subchannel_type) 891 if (sch->st == driver->subchannel_type)
794 return 1; 892 return 1;
@@ -796,32 +894,36 @@ css_bus_match (struct device *dev, struct device_driver *drv)
796 return 0; 894 return 0;
797} 895}
798 896
799static int 897static int css_probe(struct device *dev)
800css_probe (struct device *dev)
801{ 898{
802 struct subchannel *sch; 899 struct subchannel *sch;
900 int ret;
803 901
804 sch = to_subchannel(dev); 902 sch = to_subchannel(dev);
805 sch->driver = container_of (dev->driver, struct css_driver, drv); 903 sch->driver = to_cssdriver(dev->driver);
806 return (sch->driver->probe ? sch->driver->probe(sch) : 0); 904 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
905 if (ret)
906 sch->driver = NULL;
907 return ret;
807} 908}
808 909
809static int 910static int css_remove(struct device *dev)
810css_remove (struct device *dev)
811{ 911{
812 struct subchannel *sch; 912 struct subchannel *sch;
913 int ret;
813 914
814 sch = to_subchannel(dev); 915 sch = to_subchannel(dev);
815 return (sch->driver->remove ? sch->driver->remove(sch) : 0); 916 ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
917 sch->driver = NULL;
918 return ret;
816} 919}
817 920
818static void 921static void css_shutdown(struct device *dev)
819css_shutdown (struct device *dev)
820{ 922{
821 struct subchannel *sch; 923 struct subchannel *sch;
822 924
823 sch = to_subchannel(dev); 925 sch = to_subchannel(dev);
824 if (sch->driver->shutdown) 926 if (sch->driver && sch->driver->shutdown)
825 sch->driver->shutdown(sch); 927 sch->driver->shutdown(sch);
826} 928}
827 929
@@ -833,6 +935,34 @@ struct bus_type css_bus_type = {
833 .shutdown = css_shutdown, 935 .shutdown = css_shutdown,
834}; 936};
835 937
938/**
939 * css_driver_register - register a css driver
940 * @cdrv: css driver to register
941 *
942 * This is mainly a wrapper around driver_register that sets name
943 * and bus_type in the embedded struct device_driver correctly.
944 */
945int css_driver_register(struct css_driver *cdrv)
946{
947 cdrv->drv.name = cdrv->name;
948 cdrv->drv.bus = &css_bus_type;
949 cdrv->drv.owner = cdrv->owner;
950 return driver_register(&cdrv->drv);
951}
952EXPORT_SYMBOL_GPL(css_driver_register);
953
954/**
955 * css_driver_unregister - unregister a css driver
956 * @cdrv: css driver to unregister
957 *
958 * This is a wrapper around driver_unregister.
959 */
960void css_driver_unregister(struct css_driver *cdrv)
961{
962 driver_unregister(&cdrv->drv);
963}
964EXPORT_SYMBOL_GPL(css_driver_unregister);
965
836subsys_initcall(init_channel_subsystem); 966subsys_initcall(init_channel_subsystem);
837 967
838MODULE_LICENSE("GPL"); 968MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 81215ef32435..b70554523552 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -58,64 +58,6 @@ struct pgid {
58 __u32 tod_high; /* high word TOD clock */ 58 __u32 tod_high; /* high word TOD clock */
59} __attribute__ ((packed)); 59} __attribute__ ((packed));
60 60
61#define MAX_CIWS 8
62
63/*
64 * sense-id response buffer layout
65 */
66struct senseid {
67 /* common part */
68 __u8 reserved; /* always 0x'FF' */
69 __u16 cu_type; /* control unit type */
70 __u8 cu_model; /* control unit model */
71 __u16 dev_type; /* device type */
72 __u8 dev_model; /* device model */
73 __u8 unused; /* padding byte */
74 /* extended part */
75 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
76} __attribute__ ((packed,aligned(4)));
77
78struct ccw_device_private {
79 struct ccw_device *cdev;
80 struct subchannel *sch;
81 int state; /* device state */
82 atomic_t onoff;
83 unsigned long registered;
84 struct ccw_dev_id dev_id; /* device id */
85 struct subchannel_id schid; /* subchannel number */
86 __u8 imask; /* lpm mask for SNID/SID/SPGID */
87 int iretry; /* retry counter SNID/SID/SPGID */
88 struct {
89 unsigned int fast:1; /* post with "channel end" */
90 unsigned int repall:1; /* report every interrupt status */
91 unsigned int pgroup:1; /* do path grouping */
92 unsigned int force:1; /* allow forced online */
93 } __attribute__ ((packed)) options;
94 struct {
95 unsigned int pgid_single:1; /* use single path for Set PGID */
96 unsigned int esid:1; /* Ext. SenseID supported by HW */
97 unsigned int dosense:1; /* delayed SENSE required */
98 unsigned int doverify:1; /* delayed path verification */
99 unsigned int donotify:1; /* call notify function */
100 unsigned int recog_done:1; /* dev. recog. complete */
101 unsigned int fake_irb:1; /* deliver faked irb */
102 unsigned int intretry:1; /* retry internal operation */
103 } __attribute__((packed)) flags;
104 unsigned long intparm; /* user interruption parameter */
105 struct qdio_irq *qdio_data;
106 struct irb irb; /* device status */
107 struct senseid senseid; /* SenseID info */
108 struct pgid pgid[8]; /* path group IDs per chpid*/
109 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
110 struct work_struct kick_work;
111 wait_queue_head_t wait_q;
112 struct timer_list timer;
113 void *cmb; /* measurement information */
114 struct list_head cmb_list; /* list of measured devices */
115 u64 cmb_start_time; /* clock value of cmb reset */
116 void *cmb_wait; /* deferred cmb enable/disable */
117};
118
119/* 61/*
120 * A css driver handles all subchannels of one type. 62 * A css driver handles all subchannels of one type.
121 * Currently, we only care about I/O subchannels (type 0), these 63 * Currently, we only care about I/O subchannels (type 0), these
@@ -123,25 +65,35 @@ struct ccw_device_private {
123 */ 65 */
124struct subchannel; 66struct subchannel;
125struct css_driver { 67struct css_driver {
68 struct module *owner;
126 unsigned int subchannel_type; 69 unsigned int subchannel_type;
127 struct device_driver drv; 70 struct device_driver drv;
128 void (*irq)(struct device *); 71 void (*irq)(struct subchannel *);
129 int (*notify)(struct device *, int); 72 int (*notify)(struct subchannel *, int);
130 void (*verify)(struct device *); 73 void (*verify)(struct subchannel *);
131 void (*termination)(struct device *); 74 void (*termination)(struct subchannel *);
132 int (*probe)(struct subchannel *); 75 int (*probe)(struct subchannel *);
133 int (*remove)(struct subchannel *); 76 int (*remove)(struct subchannel *);
134 void (*shutdown)(struct subchannel *); 77 void (*shutdown)(struct subchannel *);
78 const char *name;
135}; 79};
136 80
81#define to_cssdriver(n) container_of(n, struct css_driver, drv)
82
137/* 83/*
138 * all css_drivers have the css_bus_type 84 * all css_drivers have the css_bus_type
139 */ 85 */
140extern struct bus_type css_bus_type; 86extern struct bus_type css_bus_type;
141 87
88extern int css_driver_register(struct css_driver *);
89extern void css_driver_unregister(struct css_driver *);
90
142extern void css_sch_device_unregister(struct subchannel *); 91extern void css_sch_device_unregister(struct subchannel *);
143extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 92extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
144extern int css_init_done; 93extern int css_init_done;
94int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
95 int (*fn_unknown)(struct subchannel_id,
96 void *), void *data);
145extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); 97extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
146extern void css_process_crw(int, int); 98extern void css_process_crw(int, int);
147extern void css_reiterate_subchannels(void); 99extern void css_reiterate_subchannels(void);
@@ -188,6 +140,8 @@ void css_schedule_eval(struct subchannel_id schid);
188void css_schedule_eval_all(void); 140void css_schedule_eval_all(void);
189 141
190int sch_is_pseudo_sch(struct subchannel *); 142int sch_is_pseudo_sch(struct subchannel *);
143struct schib;
144int css_sch_is_valid(struct schib *);
191 145
192extern struct workqueue_struct *slow_path_wq; 146extern struct workqueue_struct *slow_path_wq;
193 147
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 74f6b539974a..d35dc3f25d06 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -17,6 +17,7 @@
17#include <linux/list.h> 17#include <linux/list.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/timer.h>
20 21
21#include <asm/ccwdev.h> 22#include <asm/ccwdev.h>
22#include <asm/cio.h> 23#include <asm/cio.h>
@@ -28,6 +29,12 @@
28#include "css.h" 29#include "css.h"
29#include "device.h" 30#include "device.h"
30#include "ioasm.h" 31#include "ioasm.h"
32#include "io_sch.h"
33
34static struct timer_list recovery_timer;
35static spinlock_t recovery_lock;
36static int recovery_phase;
37static const unsigned long recovery_delay[] = { 3, 30, 300 };
31 38
32/******************* bus type handling ***********************/ 39/******************* bus type handling ***********************/
33 40
@@ -115,19 +122,18 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
115 122
116struct bus_type ccw_bus_type; 123struct bus_type ccw_bus_type;
117 124
118static int io_subchannel_probe (struct subchannel *); 125static void io_subchannel_irq(struct subchannel *);
119static int io_subchannel_remove (struct subchannel *); 126static int io_subchannel_probe(struct subchannel *);
120static int io_subchannel_notify(struct device *, int); 127static int io_subchannel_remove(struct subchannel *);
121static void io_subchannel_verify(struct device *); 128static int io_subchannel_notify(struct subchannel *, int);
122static void io_subchannel_ioterm(struct device *); 129static void io_subchannel_verify(struct subchannel *);
130static void io_subchannel_ioterm(struct subchannel *);
123static void io_subchannel_shutdown(struct subchannel *); 131static void io_subchannel_shutdown(struct subchannel *);
124 132
125static struct css_driver io_subchannel_driver = { 133static struct css_driver io_subchannel_driver = {
134 .owner = THIS_MODULE,
126 .subchannel_type = SUBCHANNEL_TYPE_IO, 135 .subchannel_type = SUBCHANNEL_TYPE_IO,
127 .drv = { 136 .name = "io_subchannel",
128 .name = "io_subchannel",
129 .bus = &css_bus_type,
130 },
131 .irq = io_subchannel_irq, 137 .irq = io_subchannel_irq,
132 .notify = io_subchannel_notify, 138 .notify = io_subchannel_notify,
133 .verify = io_subchannel_verify, 139 .verify = io_subchannel_verify,
@@ -142,6 +148,8 @@ struct workqueue_struct *ccw_device_notify_work;
142wait_queue_head_t ccw_device_init_wq; 148wait_queue_head_t ccw_device_init_wq;
143atomic_t ccw_device_init_count; 149atomic_t ccw_device_init_count;
144 150
151static void recovery_func(unsigned long data);
152
145static int __init 153static int __init
146init_ccw_bus_type (void) 154init_ccw_bus_type (void)
147{ 155{
@@ -149,6 +157,7 @@ init_ccw_bus_type (void)
149 157
150 init_waitqueue_head(&ccw_device_init_wq); 158 init_waitqueue_head(&ccw_device_init_wq);
151 atomic_set(&ccw_device_init_count, 0); 159 atomic_set(&ccw_device_init_count, 0);
160 setup_timer(&recovery_timer, recovery_func, 0);
152 161
153 ccw_device_work = create_singlethread_workqueue("cio"); 162 ccw_device_work = create_singlethread_workqueue("cio");
154 if (!ccw_device_work) 163 if (!ccw_device_work)
@@ -166,7 +175,8 @@ init_ccw_bus_type (void)
166 if ((ret = bus_register (&ccw_bus_type))) 175 if ((ret = bus_register (&ccw_bus_type)))
167 goto out_err; 176 goto out_err;
168 177
169 if ((ret = driver_register(&io_subchannel_driver.drv))) 178 ret = css_driver_register(&io_subchannel_driver);
179 if (ret)
170 goto out_err; 180 goto out_err;
171 181
172 wait_event(ccw_device_init_wq, 182 wait_event(ccw_device_init_wq,
@@ -186,7 +196,7 @@ out_err:
186static void __exit 196static void __exit
187cleanup_ccw_bus_type (void) 197cleanup_ccw_bus_type (void)
188{ 198{
189 driver_unregister(&io_subchannel_driver.drv); 199 css_driver_unregister(&io_subchannel_driver);
190 bus_unregister(&ccw_bus_type); 200 bus_unregister(&ccw_bus_type);
191 destroy_workqueue(ccw_device_notify_work); 201 destroy_workqueue(ccw_device_notify_work);
192 destroy_workqueue(ccw_device_work); 202 destroy_workqueue(ccw_device_work);
@@ -773,7 +783,7 @@ static void sch_attach_device(struct subchannel *sch,
773{ 783{
774 css_update_ssd_info(sch); 784 css_update_ssd_info(sch);
775 spin_lock_irq(sch->lock); 785 spin_lock_irq(sch->lock);
776 sch->dev.driver_data = cdev; 786 sch_set_cdev(sch, cdev);
777 cdev->private->schid = sch->schid; 787 cdev->private->schid = sch->schid;
778 cdev->ccwlock = sch->lock; 788 cdev->ccwlock = sch->lock;
779 device_trigger_reprobe(sch); 789 device_trigger_reprobe(sch);
@@ -795,7 +805,7 @@ static void sch_attach_disconnected_device(struct subchannel *sch,
795 put_device(&other_sch->dev); 805 put_device(&other_sch->dev);
796 return; 806 return;
797 } 807 }
798 other_sch->dev.driver_data = NULL; 808 sch_set_cdev(other_sch, NULL);
799 /* No need to keep a subchannel without ccw device around. */ 809 /* No need to keep a subchannel without ccw device around. */
800 css_sch_device_unregister(other_sch); 810 css_sch_device_unregister(other_sch);
801 put_device(&other_sch->dev); 811 put_device(&other_sch->dev);
@@ -831,12 +841,12 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
831 return; 841 return;
832 } 842 }
833 spin_lock_irq(sch->lock); 843 spin_lock_irq(sch->lock);
834 sch->dev.driver_data = cdev; 844 sch_set_cdev(sch, cdev);
835 spin_unlock_irq(sch->lock); 845 spin_unlock_irq(sch->lock);
836 /* Start recognition for the new ccw device. */ 846 /* Start recognition for the new ccw device. */
837 if (io_subchannel_recog(cdev, sch)) { 847 if (io_subchannel_recog(cdev, sch)) {
838 spin_lock_irq(sch->lock); 848 spin_lock_irq(sch->lock);
839 sch->dev.driver_data = NULL; 849 sch_set_cdev(sch, NULL);
840 spin_unlock_irq(sch->lock); 850 spin_unlock_irq(sch->lock);
841 if (cdev->dev.release) 851 if (cdev->dev.release)
842 cdev->dev.release(&cdev->dev); 852 cdev->dev.release(&cdev->dev);
@@ -940,7 +950,7 @@ io_subchannel_register(struct work_struct *work)
940 cdev->private->dev_id.devno, ret); 950 cdev->private->dev_id.devno, ret);
941 put_device(&cdev->dev); 951 put_device(&cdev->dev);
942 spin_lock_irqsave(sch->lock, flags); 952 spin_lock_irqsave(sch->lock, flags);
943 sch->dev.driver_data = NULL; 953 sch_set_cdev(sch, NULL);
944 spin_unlock_irqrestore(sch->lock, flags); 954 spin_unlock_irqrestore(sch->lock, flags);
945 kfree (cdev->private); 955 kfree (cdev->private);
946 kfree (cdev); 956 kfree (cdev);
@@ -1022,7 +1032,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1022 int rc; 1032 int rc;
1023 struct ccw_device_private *priv; 1033 struct ccw_device_private *priv;
1024 1034
1025 sch->dev.driver_data = cdev; 1035 sch_set_cdev(sch, cdev);
1026 sch->driver = &io_subchannel_driver; 1036 sch->driver = &io_subchannel_driver;
1027 cdev->ccwlock = sch->lock; 1037 cdev->ccwlock = sch->lock;
1028 1038
@@ -1082,7 +1092,7 @@ static void ccw_device_move_to_sch(struct work_struct *work)
1082 } 1092 }
1083 if (former_parent) { 1093 if (former_parent) {
1084 spin_lock_irq(former_parent->lock); 1094 spin_lock_irq(former_parent->lock);
1085 former_parent->dev.driver_data = NULL; 1095 sch_set_cdev(former_parent, NULL);
1086 spin_unlock_irq(former_parent->lock); 1096 spin_unlock_irq(former_parent->lock);
1087 css_sch_device_unregister(former_parent); 1097 css_sch_device_unregister(former_parent);
1088 /* Reset intparm to zeroes. */ 1098 /* Reset intparm to zeroes. */
@@ -1096,6 +1106,18 @@ out:
1096 put_device(&cdev->dev); 1106 put_device(&cdev->dev);
1097} 1107}
1098 1108
1109static void io_subchannel_irq(struct subchannel *sch)
1110{
1111 struct ccw_device *cdev;
1112
1113 cdev = sch_get_cdev(sch);
1114
1115 CIO_TRACE_EVENT(3, "IRQ");
1116 CIO_TRACE_EVENT(3, sch->dev.bus_id);
1117 if (cdev)
1118 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1119}
1120
1099static int 1121static int
1100io_subchannel_probe (struct subchannel *sch) 1122io_subchannel_probe (struct subchannel *sch)
1101{ 1123{
@@ -1104,13 +1126,13 @@ io_subchannel_probe (struct subchannel *sch)
1104 unsigned long flags; 1126 unsigned long flags;
1105 struct ccw_dev_id dev_id; 1127 struct ccw_dev_id dev_id;
1106 1128
1107 if (sch->dev.driver_data) { 1129 cdev = sch_get_cdev(sch);
1130 if (cdev) {
1108 /* 1131 /*
1109 * This subchannel already has an associated ccw_device. 1132 * This subchannel already has an associated ccw_device.
1110 * Register it and exit. This happens for all early 1133 * Register it and exit. This happens for all early
1111 * device, e.g. the console. 1134 * device, e.g. the console.
1112 */ 1135 */
1113 cdev = sch->dev.driver_data;
1114 cdev->dev.groups = ccwdev_attr_groups; 1136 cdev->dev.groups = ccwdev_attr_groups;
1115 device_initialize(&cdev->dev); 1137 device_initialize(&cdev->dev);
1116 ccw_device_register(cdev); 1138 ccw_device_register(cdev);
@@ -1132,6 +1154,11 @@ io_subchannel_probe (struct subchannel *sch)
1132 */ 1154 */
1133 dev_id.devno = sch->schib.pmcw.dev; 1155 dev_id.devno = sch->schib.pmcw.dev;
1134 dev_id.ssid = sch->schid.ssid; 1156 dev_id.ssid = sch->schid.ssid;
1157 /* Allocate I/O subchannel private data. */
1158 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1159 GFP_KERNEL | GFP_DMA);
1160 if (!sch->private)
1161 return -ENOMEM;
1135 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1162 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1136 if (!cdev) 1163 if (!cdev)
1137 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1164 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
@@ -1149,16 +1176,18 @@ io_subchannel_probe (struct subchannel *sch)
1149 return 0; 1176 return 0;
1150 } 1177 }
1151 cdev = io_subchannel_create_ccwdev(sch); 1178 cdev = io_subchannel_create_ccwdev(sch);
1152 if (IS_ERR(cdev)) 1179 if (IS_ERR(cdev)) {
1180 kfree(sch->private);
1153 return PTR_ERR(cdev); 1181 return PTR_ERR(cdev);
1154 1182 }
1155 rc = io_subchannel_recog(cdev, sch); 1183 rc = io_subchannel_recog(cdev, sch);
1156 if (rc) { 1184 if (rc) {
1157 spin_lock_irqsave(sch->lock, flags); 1185 spin_lock_irqsave(sch->lock, flags);
1158 sch->dev.driver_data = NULL; 1186 sch_set_cdev(sch, NULL);
1159 spin_unlock_irqrestore(sch->lock, flags); 1187 spin_unlock_irqrestore(sch->lock, flags);
1160 if (cdev->dev.release) 1188 if (cdev->dev.release)
1161 cdev->dev.release(&cdev->dev); 1189 cdev->dev.release(&cdev->dev);
1190 kfree(sch->private);
1162 } 1191 }
1163 1192
1164 return rc; 1193 return rc;
@@ -1170,25 +1199,25 @@ io_subchannel_remove (struct subchannel *sch)
1170 struct ccw_device *cdev; 1199 struct ccw_device *cdev;
1171 unsigned long flags; 1200 unsigned long flags;
1172 1201
1173 if (!sch->dev.driver_data) 1202 cdev = sch_get_cdev(sch);
1203 if (!cdev)
1174 return 0; 1204 return 0;
1175 cdev = sch->dev.driver_data;
1176 /* Set ccw device to not operational and drop reference. */ 1205 /* Set ccw device to not operational and drop reference. */
1177 spin_lock_irqsave(cdev->ccwlock, flags); 1206 spin_lock_irqsave(cdev->ccwlock, flags);
1178 sch->dev.driver_data = NULL; 1207 sch_set_cdev(sch, NULL);
1179 cdev->private->state = DEV_STATE_NOT_OPER; 1208 cdev->private->state = DEV_STATE_NOT_OPER;
1180 spin_unlock_irqrestore(cdev->ccwlock, flags); 1209 spin_unlock_irqrestore(cdev->ccwlock, flags);
1181 ccw_device_unregister(cdev); 1210 ccw_device_unregister(cdev);
1182 put_device(&cdev->dev); 1211 put_device(&cdev->dev);
1212 kfree(sch->private);
1183 return 0; 1213 return 0;
1184} 1214}
1185 1215
1186static int 1216static int io_subchannel_notify(struct subchannel *sch, int event)
1187io_subchannel_notify(struct device *dev, int event)
1188{ 1217{
1189 struct ccw_device *cdev; 1218 struct ccw_device *cdev;
1190 1219
1191 cdev = dev->driver_data; 1220 cdev = sch_get_cdev(sch);
1192 if (!cdev) 1221 if (!cdev)
1193 return 0; 1222 return 0;
1194 if (!cdev->drv) 1223 if (!cdev->drv)
@@ -1198,22 +1227,20 @@ io_subchannel_notify(struct device *dev, int event)
1198 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 1227 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1199} 1228}
1200 1229
1201static void 1230static void io_subchannel_verify(struct subchannel *sch)
1202io_subchannel_verify(struct device *dev)
1203{ 1231{
1204 struct ccw_device *cdev; 1232 struct ccw_device *cdev;
1205 1233
1206 cdev = dev->driver_data; 1234 cdev = sch_get_cdev(sch);
1207 if (cdev) 1235 if (cdev)
1208 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1236 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1209} 1237}
1210 1238
1211static void 1239static void io_subchannel_ioterm(struct subchannel *sch)
1212io_subchannel_ioterm(struct device *dev)
1213{ 1240{
1214 struct ccw_device *cdev; 1241 struct ccw_device *cdev;
1215 1242
1216 cdev = dev->driver_data; 1243 cdev = sch_get_cdev(sch);
1217 if (!cdev) 1244 if (!cdev)
1218 return; 1245 return;
1219 /* Internal I/O will be retried by the interrupt handler. */ 1246 /* Internal I/O will be retried by the interrupt handler. */
@@ -1231,7 +1258,7 @@ io_subchannel_shutdown(struct subchannel *sch)
1231 struct ccw_device *cdev; 1258 struct ccw_device *cdev;
1232 int ret; 1259 int ret;
1233 1260
1234 cdev = sch->dev.driver_data; 1261 cdev = sch_get_cdev(sch);
1235 1262
1236 if (cio_is_console(sch->schid)) 1263 if (cio_is_console(sch->schid))
1237 return; 1264 return;
@@ -1271,6 +1298,9 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
1271{ 1298{
1272 int rc; 1299 int rc;
1273 1300
1301 /* Attach subchannel private data. */
1302 sch->private = cio_get_console_priv();
1303 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1274 /* Initialize the ccw_device structure. */ 1304 /* Initialize the ccw_device structure. */
1275 cdev->dev.parent= &sch->dev; 1305 cdev->dev.parent= &sch->dev;
1276 rc = io_subchannel_recog(cdev, sch); 1306 rc = io_subchannel_recog(cdev, sch);
@@ -1456,6 +1486,7 @@ int ccw_driver_register(struct ccw_driver *cdriver)
1456 1486
1457 drv->bus = &ccw_bus_type; 1487 drv->bus = &ccw_bus_type;
1458 drv->name = cdriver->name; 1488 drv->name = cdriver->name;
1489 drv->owner = cdriver->owner;
1459 1490
1460 return driver_register(drv); 1491 return driver_register(drv);
1461} 1492}
@@ -1481,6 +1512,60 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
1481 return sch->schid; 1512 return sch->schid;
1482} 1513}
1483 1514
1515static int recovery_check(struct device *dev, void *data)
1516{
1517 struct ccw_device *cdev = to_ccwdev(dev);
1518 int *redo = data;
1519
1520 spin_lock_irq(cdev->ccwlock);
1521 switch (cdev->private->state) {
1522 case DEV_STATE_DISCONNECTED:
1523 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1524 cdev->private->dev_id.ssid,
1525 cdev->private->dev_id.devno);
1526 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1527 *redo = 1;
1528 break;
1529 case DEV_STATE_DISCONNECTED_SENSE_ID:
1530 *redo = 1;
1531 break;
1532 }
1533 spin_unlock_irq(cdev->ccwlock);
1534
1535 return 0;
1536}
1537
1538static void recovery_func(unsigned long data)
1539{
1540 int redo = 0;
1541
1542 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1543 if (redo) {
1544 spin_lock_irq(&recovery_lock);
1545 if (!timer_pending(&recovery_timer)) {
1546 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1547 recovery_phase++;
1548 mod_timer(&recovery_timer, jiffies +
1549 recovery_delay[recovery_phase] * HZ);
1550 }
1551 spin_unlock_irq(&recovery_lock);
1552 } else
1553 CIO_MSG_EVENT(2, "recovery: end\n");
1554}
1555
1556void ccw_device_schedule_recovery(void)
1557{
1558 unsigned long flags;
1559
1560 CIO_MSG_EVENT(2, "recovery: schedule\n");
1561 spin_lock_irqsave(&recovery_lock, flags);
1562 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1563 recovery_phase = 0;
1564 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1565 }
1566 spin_unlock_irqrestore(&recovery_lock, flags);
1567}
1568
1484MODULE_LICENSE("GPL"); 1569MODULE_LICENSE("GPL");
1485EXPORT_SYMBOL(ccw_device_set_online); 1570EXPORT_SYMBOL(ccw_device_set_online);
1486EXPORT_SYMBOL(ccw_device_set_offline); 1571EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 0d4089600439..d40a2ffaa000 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -5,6 +5,8 @@
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7 7
8#include "io_sch.h"
9
8/* 10/*
9 * states of the device statemachine 11 * states of the device statemachine
10 */ 12 */
@@ -74,7 +76,6 @@ extern struct workqueue_struct *ccw_device_notify_work;
74extern wait_queue_head_t ccw_device_init_wq; 76extern wait_queue_head_t ccw_device_init_wq;
75extern atomic_t ccw_device_init_count; 77extern atomic_t ccw_device_init_count;
76 78
77void io_subchannel_irq (struct device *pdev);
78void io_subchannel_recog_done(struct ccw_device *cdev); 79void io_subchannel_recog_done(struct ccw_device *cdev);
79 80
80int ccw_device_cancel_halt_clear(struct ccw_device *); 81int ccw_device_cancel_halt_clear(struct ccw_device *);
@@ -87,6 +88,8 @@ int ccw_device_recognition(struct ccw_device *);
87int ccw_device_online(struct ccw_device *); 88int ccw_device_online(struct ccw_device *);
88int ccw_device_offline(struct ccw_device *); 89int ccw_device_offline(struct ccw_device *);
89 90
91void ccw_device_schedule_recovery(void);
92
90/* Function prototypes for device status and basic sense stuff. */ 93/* Function prototypes for device status and basic sense stuff. */
91void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 94void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
92void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); 95void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index bfad421cda66..4b92c84fb438 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -25,14 +25,16 @@
25#include "ioasm.h" 25#include "ioasm.h"
26#include "chp.h" 26#include "chp.h"
27 27
28static int timeout_log_enabled;
29
28int 30int
29device_is_online(struct subchannel *sch) 31device_is_online(struct subchannel *sch)
30{ 32{
31 struct ccw_device *cdev; 33 struct ccw_device *cdev;
32 34
33 if (!sch->dev.driver_data) 35 cdev = sch_get_cdev(sch);
36 if (!cdev)
34 return 0; 37 return 0;
35 cdev = sch->dev.driver_data;
36 return (cdev->private->state == DEV_STATE_ONLINE); 38 return (cdev->private->state == DEV_STATE_ONLINE);
37} 39}
38 40
@@ -41,9 +43,9 @@ device_is_disconnected(struct subchannel *sch)
41{ 43{
42 struct ccw_device *cdev; 44 struct ccw_device *cdev;
43 45
44 if (!sch->dev.driver_data) 46 cdev = sch_get_cdev(sch);
47 if (!cdev)
45 return 0; 48 return 0;
46 cdev = sch->dev.driver_data;
47 return (cdev->private->state == DEV_STATE_DISCONNECTED || 49 return (cdev->private->state == DEV_STATE_DISCONNECTED ||
48 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
49} 51}
@@ -53,19 +55,21 @@ device_set_disconnected(struct subchannel *sch)
53{ 55{
54 struct ccw_device *cdev; 56 struct ccw_device *cdev;
55 57
56 if (!sch->dev.driver_data) 58 cdev = sch_get_cdev(sch);
59 if (!cdev)
57 return; 60 return;
58 cdev = sch->dev.driver_data;
59 ccw_device_set_timeout(cdev, 0); 61 ccw_device_set_timeout(cdev, 0);
60 cdev->private->flags.fake_irb = 0; 62 cdev->private->flags.fake_irb = 0;
61 cdev->private->state = DEV_STATE_DISCONNECTED; 63 cdev->private->state = DEV_STATE_DISCONNECTED;
64 if (cdev->online)
65 ccw_device_schedule_recovery();
62} 66}
63 67
64void device_set_intretry(struct subchannel *sch) 68void device_set_intretry(struct subchannel *sch)
65{ 69{
66 struct ccw_device *cdev; 70 struct ccw_device *cdev;
67 71
68 cdev = sch->dev.driver_data; 72 cdev = sch_get_cdev(sch);
69 if (!cdev) 73 if (!cdev)
70 return; 74 return;
71 cdev->private->flags.intretry = 1; 75 cdev->private->flags.intretry = 1;
@@ -75,13 +79,62 @@ int device_trigger_verify(struct subchannel *sch)
75{ 79{
76 struct ccw_device *cdev; 80 struct ccw_device *cdev;
77 81
78 cdev = sch->dev.driver_data; 82 cdev = sch_get_cdev(sch);
79 if (!cdev || !cdev->online) 83 if (!cdev || !cdev->online)
80 return -EINVAL; 84 return -EINVAL;
81 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 85 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
82 return 0; 86 return 0;
83} 87}
84 88
89static int __init ccw_timeout_log_setup(char *unused)
90{
91 timeout_log_enabled = 1;
92 return 1;
93}
94
95__setup("ccw_timeout_log", ccw_timeout_log_setup);
96
97static void ccw_timeout_log(struct ccw_device *cdev)
98{
99 struct schib schib;
100 struct subchannel *sch;
101 struct io_subchannel_private *private;
102 int cc;
103
104 sch = to_subchannel(cdev->dev.parent);
105 private = to_io_private(sch);
106 cc = stsch(sch->schid, &schib);
107
108 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
109 "device information:\n", get_clock());
110 printk(KERN_WARNING "cio: orb:\n");
111 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
112 &private->orb, sizeof(private->orb), 0);
113 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
114 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
115 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
116 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
117
118 if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
119 (void *)(addr_t)private->orb.cpa == cdev->private->iccws)
120 printk(KERN_WARNING "cio: last channel program (intern):\n");
121 else
122 printk(KERN_WARNING "cio: last channel program:\n");
123
124 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
125 (void *)(addr_t)private->orb.cpa,
126 sizeof(struct ccw1), 0);
127 printk(KERN_WARNING "cio: ccw device state: %d\n",
128 cdev->private->state);
129 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
130 printk(KERN_WARNING "cio: schib:\n");
131 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
132 &schib, sizeof(schib), 0);
133 printk(KERN_WARNING "cio: ccw device flags:\n");
134 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
135 &cdev->private->flags, sizeof(cdev->private->flags), 0);
136}
137
85/* 138/*
86 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 139 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
87 */ 140 */
@@ -92,6 +145,8 @@ ccw_device_timeout(unsigned long data)
92 145
93 cdev = (struct ccw_device *) data; 146 cdev = (struct ccw_device *) data;
94 spin_lock_irq(cdev->ccwlock); 147 spin_lock_irq(cdev->ccwlock);
148 if (timeout_log_enabled)
149 ccw_timeout_log(cdev);
95 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 150 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
96 spin_unlock_irq(cdev->ccwlock); 151 spin_unlock_irq(cdev->ccwlock);
97} 152}
@@ -122,9 +177,9 @@ device_kill_pending_timer(struct subchannel *sch)
122{ 177{
123 struct ccw_device *cdev; 178 struct ccw_device *cdev;
124 179
125 if (!sch->dev.driver_data) 180 cdev = sch_get_cdev(sch);
181 if (!cdev)
126 return; 182 return;
127 cdev = sch->dev.driver_data;
128 ccw_device_set_timeout(cdev, 0); 183 ccw_device_set_timeout(cdev, 0);
129} 184}
130 185
@@ -268,7 +323,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
268 switch (state) { 323 switch (state) {
269 case DEV_STATE_NOT_OPER: 324 case DEV_STATE_NOT_OPER:
270 CIO_DEBUG(KERN_WARNING, 2, 325 CIO_DEBUG(KERN_WARNING, 2,
271 "cio: SenseID : unknown device %04x on subchannel " 326 "SenseID : unknown device %04x on subchannel "
272 "0.%x.%04x\n", cdev->private->dev_id.devno, 327 "0.%x.%04x\n", cdev->private->dev_id.devno,
273 sch->schid.ssid, sch->schid.sch_no); 328 sch->schid.ssid, sch->schid.sch_no);
274 break; 329 break;
@@ -294,7 +349,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
294 } 349 }
295 /* Issue device info message. */ 350 /* Issue device info message. */
296 CIO_DEBUG(KERN_INFO, 2, 351 CIO_DEBUG(KERN_INFO, 2,
297 "cio: SenseID : device 0.%x.%04x reports: " 352 "SenseID : device 0.%x.%04x reports: "
298 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 353 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
299 "%04X/%02X\n", 354 "%04X/%02X\n",
300 cdev->private->dev_id.ssid, 355 cdev->private->dev_id.ssid,
@@ -304,7 +359,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
304 break; 359 break;
305 case DEV_STATE_BOXED: 360 case DEV_STATE_BOXED:
306 CIO_DEBUG(KERN_WARNING, 2, 361 CIO_DEBUG(KERN_WARNING, 2,
307 "cio: SenseID : boxed device %04x on subchannel " 362 "SenseID : boxed device %04x on subchannel "
308 "0.%x.%04x\n", cdev->private->dev_id.devno, 363 "0.%x.%04x\n", cdev->private->dev_id.devno,
309 sch->schid.ssid, sch->schid.sch_no); 364 sch->schid.ssid, sch->schid.sch_no);
310 break; 365 break;
@@ -349,7 +404,7 @@ ccw_device_oper_notify(struct work_struct *work)
349 sch = to_subchannel(cdev->dev.parent); 404 sch = to_subchannel(cdev->dev.parent);
350 if (sch->driver && sch->driver->notify) { 405 if (sch->driver && sch->driver->notify) {
351 spin_unlock_irqrestore(cdev->ccwlock, flags); 406 spin_unlock_irqrestore(cdev->ccwlock, flags);
352 ret = sch->driver->notify(&sch->dev, CIO_OPER); 407 ret = sch->driver->notify(sch, CIO_OPER);
353 spin_lock_irqsave(cdev->ccwlock, flags); 408 spin_lock_irqsave(cdev->ccwlock, flags);
354 } else 409 } else
355 ret = 0; 410 ret = 0;
@@ -389,7 +444,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
389 444
390 if (state == DEV_STATE_BOXED) 445 if (state == DEV_STATE_BOXED)
391 CIO_DEBUG(KERN_WARNING, 2, 446 CIO_DEBUG(KERN_WARNING, 2,
392 "cio: Boxed device %04x on subchannel %04x\n", 447 "Boxed device %04x on subchannel %04x\n",
393 cdev->private->dev_id.devno, sch->schid.sch_no); 448 cdev->private->dev_id.devno, sch->schid.sch_no);
394 449
395 if (cdev->private->flags.donotify) { 450 if (cdev->private->flags.donotify) {
@@ -500,7 +555,8 @@ ccw_device_recognition(struct ccw_device *cdev)
500 (cdev->private->state != DEV_STATE_BOXED)) 555 (cdev->private->state != DEV_STATE_BOXED))
501 return -EINVAL; 556 return -EINVAL;
502 sch = to_subchannel(cdev->dev.parent); 557 sch = to_subchannel(cdev->dev.parent);
503 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); 558 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
559 (u32)(addr_t)sch);
504 if (ret != 0) 560 if (ret != 0)
505 /* Couldn't enable the subchannel for i/o. Sick device. */ 561 /* Couldn't enable the subchannel for i/o. Sick device. */
506 return ret; 562 return ret;
@@ -587,9 +643,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
587 default: 643 default:
588 /* Reset oper notify indication after verify error. */ 644 /* Reset oper notify indication after verify error. */
589 cdev->private->flags.donotify = 0; 645 cdev->private->flags.donotify = 0;
590 if (cdev->online) 646 if (cdev->online) {
647 ccw_device_set_timeout(cdev, 0);
591 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 648 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
592 else 649 } else
593 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 650 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
594 break; 651 break;
595 } 652 }
@@ -610,7 +667,8 @@ ccw_device_online(struct ccw_device *cdev)
610 sch = to_subchannel(cdev->dev.parent); 667 sch = to_subchannel(cdev->dev.parent);
611 if (css_init_done && !get_device(&cdev->dev)) 668 if (css_init_done && !get_device(&cdev->dev))
612 return -ENODEV; 669 return -ENODEV;
613 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); 670 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
671 (u32)(addr_t)sch);
614 if (ret != 0) { 672 if (ret != 0) {
615 /* Couldn't enable the subchannel for i/o. Sick device. */ 673 /* Couldn't enable the subchannel for i/o. Sick device. */
616 if (ret == -ENODEV) 674 if (ret == -ENODEV)
@@ -937,7 +995,7 @@ void device_kill_io(struct subchannel *sch)
937 int ret; 995 int ret;
938 struct ccw_device *cdev; 996 struct ccw_device *cdev;
939 997
940 cdev = sch->dev.driver_data; 998 cdev = sch_get_cdev(sch);
941 ret = ccw_device_cancel_halt_clear(cdev); 999 ret = ccw_device_cancel_halt_clear(cdev);
942 if (ret == -EBUSY) { 1000 if (ret == -EBUSY) {
943 ccw_device_set_timeout(cdev, 3*HZ); 1001 ccw_device_set_timeout(cdev, 3*HZ);
@@ -990,7 +1048,8 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
990 struct subchannel *sch; 1048 struct subchannel *sch;
991 1049
992 sch = to_subchannel(cdev->dev.parent); 1050 sch = to_subchannel(cdev->dev.parent);
993 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) 1051 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc,
1052 (u32)(addr_t)sch) != 0)
994 /* Couldn't enable the subchannel for i/o. Sick device. */ 1053 /* Couldn't enable the subchannel for i/o. Sick device. */
995 return; 1054 return;
996 1055
@@ -1006,9 +1065,9 @@ device_trigger_reprobe(struct subchannel *sch)
1006{ 1065{
1007 struct ccw_device *cdev; 1066 struct ccw_device *cdev;
1008 1067
1009 if (!sch->dev.driver_data) 1068 cdev = sch_get_cdev(sch);
1069 if (!cdev)
1010 return; 1070 return;
1011 cdev = sch->dev.driver_data;
1012 if (cdev->private->state != DEV_STATE_DISCONNECTED) 1071 if (cdev->private->state != DEV_STATE_DISCONNECTED)
1013 return; 1072 return;
1014 1073
@@ -1028,7 +1087,7 @@ device_trigger_reprobe(struct subchannel *sch)
1028 sch->schib.pmcw.ena = 0; 1087 sch->schib.pmcw.ena = 0;
1029 if ((sch->lpm & (sch->lpm - 1)) != 0) 1088 if ((sch->lpm & (sch->lpm - 1)) != 0)
1030 sch->schib.pmcw.mp = 1; 1089 sch->schib.pmcw.mp = 1;
1031 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 1090 sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1032 /* We should also udate ssd info, but this has to wait. */ 1091 /* We should also udate ssd info, but this has to wait. */
1033 /* Check if this is another device which appeared on the same sch. */ 1092 /* Check if this is another device which appeared on the same sch. */
1034 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1093 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
@@ -1223,21 +1282,4 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1223 }, 1282 },
1224}; 1283};
1225 1284
1226/*
1227 * io_subchannel_irq is called for "real" interrupts or for status
1228 * pending conditions on msch.
1229 */
1230void
1231io_subchannel_irq (struct device *pdev)
1232{
1233 struct ccw_device *cdev;
1234
1235 cdev = to_subchannel(pdev)->dev.driver_data;
1236
1237 CIO_TRACE_EVENT (3, "IRQ");
1238 CIO_TRACE_EVENT (3, pdev->bus_id);
1239 if (cdev)
1240 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1241}
1242
1243EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1285EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 156f3f9786b5..918b8b89cf9a 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -24,6 +24,7 @@
24#include "css.h" 24#include "css.h"
25#include "device.h" 25#include "device.h"
26#include "ioasm.h" 26#include "ioasm.h"
27#include "io_sch.h"
27 28
28/* 29/*
29 * Input : 30 * Input :
@@ -219,11 +220,13 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
219 return -EAGAIN; 220 return -EAGAIN;
220 } 221 }
221 if (irb->scsw.cc == 3) { 222 if (irb->scsw.cc == 3) {
222 if ((sch->orb.lpm & 223 u8 lpm;
223 sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 224
225 lpm = to_io_private(sch)->orb.lpm;
226 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0)
224 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x " 227 CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x "
225 "on subchannel 0.%x.%04x is " 228 "on subchannel 0.%x.%04x is "
226 "'not operational'\n", sch->orb.lpm, 229 "'not operational'\n", lpm,
227 cdev->private->dev_id.devno, 230 cdev->private->dev_id.devno,
228 sch->schid.ssid, sch->schid.sch_no); 231 sch->schid.ssid, sch->schid.sch_no);
229 return -EACCES; 232 return -EACCES;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 7fd2dadc3297..49b58eb0fab8 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -501,7 +501,7 @@ ccw_device_stlck(struct ccw_device *cdev)
501 return -ENOMEM; 501 return -ENOMEM;
502 } 502 }
503 spin_lock_irqsave(sch->lock, flags); 503 spin_lock_irqsave(sch->lock, flags);
504 ret = cio_enable_subchannel(sch, 3); 504 ret = cio_enable_subchannel(sch, 3, (u32)(addr_t)sch);
505 if (ret) 505 if (ret)
506 goto out_unlock; 506 goto out_unlock;
507 /* 507 /*
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index cb1879a96818..c52449a1f9fc 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -22,6 +22,7 @@
22#include "css.h" 22#include "css.h"
23#include "device.h" 23#include "device.h"
24#include "ioasm.h" 24#include "ioasm.h"
25#include "io_sch.h"
25 26
26/* 27/*
27 * Helper function called from interrupt context to decide whether an 28 * Helper function called from interrupt context to decide whether an
@@ -155,10 +156,13 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev)
155 return -EAGAIN; 156 return -EAGAIN;
156 } 157 }
157 if (irb->scsw.cc == 3) { 158 if (irb->scsw.cc == 3) {
159 u8 lpm;
160
161 lpm = to_io_private(sch)->orb.lpm;
158 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x," 162 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x,"
159 " lpm %02X, became 'not operational'\n", 163 " lpm %02X, became 'not operational'\n",
160 cdev->private->dev_id.devno, sch->schid.ssid, 164 cdev->private->dev_id.devno, sch->schid.ssid,
161 sch->schid.sch_no, sch->orb.lpm); 165 sch->schid.sch_no, lpm);
162 return -EACCES; 166 return -EACCES;
163 } 167 }
164 i = 8 - ffs(cdev->private->imask); 168 i = 8 - ffs(cdev->private->imask);
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index aa96e6752592..ebe0848cfe33 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -20,6 +20,7 @@
20#include "css.h" 20#include "css.h"
21#include "device.h" 21#include "device.h"
22#include "ioasm.h" 22#include "ioasm.h"
23#include "io_sch.h"
23 24
24/* 25/*
25 * Check for any kind of channel or interface control check but don't 26 * Check for any kind of channel or interface control check but don't
@@ -310,6 +311,7 @@ int
310ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) 311ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
311{ 312{
312 struct subchannel *sch; 313 struct subchannel *sch;
314 struct ccw1 *sense_ccw;
313 315
314 sch = to_subchannel(cdev->dev.parent); 316 sch = to_subchannel(cdev->dev.parent);
315 317
@@ -326,15 +328,16 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
326 /* 328 /*
327 * We have ending status but no sense information. Do a basic sense. 329 * We have ending status but no sense information. Do a basic sense.
328 */ 330 */
329 sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; 331 sense_ccw = &to_io_private(sch)->sense_ccw;
330 sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); 332 sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
331 sch->sense_ccw.count = SENSE_MAX_COUNT; 333 sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
332 sch->sense_ccw.flags = CCW_FLAG_SLI; 334 sense_ccw->count = SENSE_MAX_COUNT;
335 sense_ccw->flags = CCW_FLAG_SLI;
333 336
334 /* Reset internal retry indication. */ 337 /* Reset internal retry indication. */
335 cdev->private->flags.intretry = 0; 338 cdev->private->flags.intretry = 0;
336 339
337 return cio_start (sch, &sch->sense_ccw, 0xff); 340 return cio_start(sch, sense_ccw, 0xff);
338} 341}
339 342
340/* 343/*
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
new file mode 100644
index 000000000000..8c613160bfce
--- /dev/null
+++ b/drivers/s390/cio/io_sch.h
@@ -0,0 +1,163 @@
1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H
3
4#include "schid.h"
5
6/*
7 * operation request block
8 */
9struct orb {
10 u32 intparm; /* interruption parameter */
11 u32 key : 4; /* flags, like key, suspend control, etc. */
12 u32 spnd : 1; /* suspend control */
13 u32 res1 : 1; /* reserved */
14 u32 mod : 1; /* modification control */
15 u32 sync : 1; /* synchronize control */
16 u32 fmt : 1; /* format control */
17 u32 pfch : 1; /* prefetch control */
18 u32 isic : 1; /* initial-status-interruption control */
19 u32 alcc : 1; /* address-limit-checking control */
20 u32 ssic : 1; /* suppress-suspended-interr. control */
21 u32 res2 : 1; /* reserved */
22 u32 c64 : 1; /* IDAW/QDIO 64 bit control */
23 u32 i2k : 1; /* IDAW 2/4kB block size control */
24 u32 lpm : 8; /* logical path mask */
25 u32 ils : 1; /* incorrect length */
26 u32 zero : 6; /* reserved zeros */
27 u32 orbx : 1; /* ORB extension control */
28 u32 cpa; /* channel program address */
29} __attribute__ ((packed, aligned(4)));
30
31struct io_subchannel_private {
32 struct orb orb; /* operation request block */
33 struct ccw1 sense_ccw; /* static ccw for sense command */
34} __attribute__ ((aligned(8)));
35
36#define to_io_private(n) ((struct io_subchannel_private *)n->private)
37#define sch_get_cdev(n) (dev_get_drvdata(&n->dev))
38#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c))
39
40#define MAX_CIWS 8
41
42/*
43 * sense-id response buffer layout
44 */
45struct senseid {
46 /* common part */
47 u8 reserved; /* always 0x'FF' */
48 u16 cu_type; /* control unit type */
49 u8 cu_model; /* control unit model */
50 u16 dev_type; /* device type */
51 u8 dev_model; /* device model */
52 u8 unused; /* padding byte */
53 /* extended part */
54 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
55} __attribute__ ((packed, aligned(4)));
56
57struct ccw_device_private {
58 struct ccw_device *cdev;
59 struct subchannel *sch;
60 int state; /* device state */
61 atomic_t onoff;
62 unsigned long registered;
63 struct ccw_dev_id dev_id; /* device id */
64 struct subchannel_id schid; /* subchannel number */
65 u8 imask; /* lpm mask for SNID/SID/SPGID */
66 int iretry; /* retry counter SNID/SID/SPGID */
67 struct {
68 unsigned int fast:1; /* post with "channel end" */
69 unsigned int repall:1; /* report every interrupt status */
70 unsigned int pgroup:1; /* do path grouping */
71 unsigned int force:1; /* allow forced online */
72 } __attribute__ ((packed)) options;
73 struct {
74 unsigned int pgid_single:1; /* use single path for Set PGID */
75 unsigned int esid:1; /* Ext. SenseID supported by HW */
76 unsigned int dosense:1; /* delayed SENSE required */
77 unsigned int doverify:1; /* delayed path verification */
78 unsigned int donotify:1; /* call notify function */
79 unsigned int recog_done:1; /* dev. recog. complete */
80 unsigned int fake_irb:1; /* deliver faked irb */
81 unsigned int intretry:1; /* retry internal operation */
82 } __attribute__((packed)) flags;
83 unsigned long intparm; /* user interruption parameter */
84 struct qdio_irq *qdio_data;
85 struct irb irb; /* device status */
86 struct senseid senseid; /* SenseID info */
87 struct pgid pgid[8]; /* path group IDs per chpid*/
88 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
89 struct work_struct kick_work;
90 wait_queue_head_t wait_q;
91 struct timer_list timer;
92 void *cmb; /* measurement information */
93 struct list_head cmb_list; /* list of measured devices */
94 u64 cmb_start_time; /* clock value of cmb reset */
95 void *cmb_wait; /* deferred cmb enable/disable */
96};
97
98static inline int ssch(struct subchannel_id schid, volatile struct orb *addr)
99{
100 register struct subchannel_id reg1 asm("1") = schid;
101 int ccode;
102
103 asm volatile(
104 " ssch 0(%2)\n"
105 " ipm %0\n"
106 " srl %0,28"
107 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
108 return ccode;
109}
110
111static inline int rsch(struct subchannel_id schid)
112{
113 register struct subchannel_id reg1 asm("1") = schid;
114 int ccode;
115
116 asm volatile(
117 " rsch\n"
118 " ipm %0\n"
119 " srl %0,28"
120 : "=d" (ccode) : "d" (reg1) : "cc");
121 return ccode;
122}
123
124static inline int csch(struct subchannel_id schid)
125{
126 register struct subchannel_id reg1 asm("1") = schid;
127 int ccode;
128
129 asm volatile(
130 " csch\n"
131 " ipm %0\n"
132 " srl %0,28"
133 : "=d" (ccode) : "d" (reg1) : "cc");
134 return ccode;
135}
136
137static inline int hsch(struct subchannel_id schid)
138{
139 register struct subchannel_id reg1 asm("1") = schid;
140 int ccode;
141
142 asm volatile(
143 " hsch\n"
144 " ipm %0\n"
145 " srl %0,28"
146 : "=d" (ccode) : "d" (reg1) : "cc");
147 return ccode;
148}
149
150static inline int xsch(struct subchannel_id schid)
151{
152 register struct subchannel_id reg1 asm("1") = schid;
153 int ccode;
154
155 asm volatile(
156 " .insn rre,0xb2760000,%1,0\n"
157 " ipm %0\n"
158 " srl %0,28"
159 : "=d" (ccode) : "d" (reg1) : "cc");
160 return ccode;
161}
162
163#endif
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 7153dd959082..652ea3625f9d 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -109,72 +109,6 @@ static inline int tpi( volatile struct tpi_info *addr)
109 return ccode; 109 return ccode;
110} 110}
111 111
112static inline int ssch(struct subchannel_id schid,
113 volatile struct orb *addr)
114{
115 register struct subchannel_id reg1 asm ("1") = schid;
116 int ccode;
117
118 asm volatile(
119 " ssch 0(%2)\n"
120 " ipm %0\n"
121 " srl %0,28"
122 : "=d" (ccode) : "d" (reg1), "a" (addr), "m" (*addr) : "cc");
123 return ccode;
124}
125
126static inline int rsch(struct subchannel_id schid)
127{
128 register struct subchannel_id reg1 asm ("1") = schid;
129 int ccode;
130
131 asm volatile(
132 " rsch\n"
133 " ipm %0\n"
134 " srl %0,28"
135 : "=d" (ccode) : "d" (reg1) : "cc");
136 return ccode;
137}
138
139static inline int csch(struct subchannel_id schid)
140{
141 register struct subchannel_id reg1 asm ("1") = schid;
142 int ccode;
143
144 asm volatile(
145 " csch\n"
146 " ipm %0\n"
147 " srl %0,28"
148 : "=d" (ccode) : "d" (reg1) : "cc");
149 return ccode;
150}
151
152static inline int hsch(struct subchannel_id schid)
153{
154 register struct subchannel_id reg1 asm ("1") = schid;
155 int ccode;
156
157 asm volatile(
158 " hsch\n"
159 " ipm %0\n"
160 " srl %0,28"
161 : "=d" (ccode) : "d" (reg1) : "cc");
162 return ccode;
163}
164
165static inline int xsch(struct subchannel_id schid)
166{
167 register struct subchannel_id reg1 asm ("1") = schid;
168 int ccode;
169
170 asm volatile(
171 " .insn rre,0xb2760000,%1,0\n"
172 " ipm %0\n"
173 " srl %0,28"
174 : "=d" (ccode) : "d" (reg1) : "cc");
175 return ccode;
176}
177
178static inline int chsc(void *chsc_area) 112static inline int chsc(void *chsc_area)
179{ 113{
180 typedef struct { char _[4096]; } addr_type; 114 typedef struct { char _[4096]; } addr_type;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 40a3208c7cf3..e2a781b6b21d 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -48,11 +48,11 @@
48#include <asm/debug.h> 48#include <asm/debug.h>
49#include <asm/s390_rdev.h> 49#include <asm/s390_rdev.h>
50#include <asm/qdio.h> 50#include <asm/qdio.h>
51#include <asm/airq.h>
51 52
52#include "cio.h" 53#include "cio.h"
53#include "css.h" 54#include "css.h"
54#include "device.h" 55#include "device.h"
55#include "airq.h"
56#include "qdio.h" 56#include "qdio.h"
57#include "ioasm.h" 57#include "ioasm.h"
58#include "chsc.h" 58#include "chsc.h"
@@ -96,7 +96,7 @@ static debug_info_t *qdio_dbf_slsb_in;
96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change 96static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */ 97 during a while loop */
98static DEFINE_SPINLOCK(ttiq_list_lock); 98static DEFINE_SPINLOCK(ttiq_list_lock);
99static int register_thinint_result; 99static void *tiqdio_ind;
100static void tiqdio_tl(unsigned long); 100static void tiqdio_tl(unsigned long);
101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); 101static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102 102
@@ -399,7 +399,7 @@ qdio_get_indicator(void)
399{ 399{
400 int i; 400 int i;
401 401
402 for (i=1;i<INDICATORS_PER_CACHELINE;i++) 402 for (i = 0; i < INDICATORS_PER_CACHELINE; i++)
403 if (!indicator_used[i]) { 403 if (!indicator_used[i]) {
404 indicator_used[i]=1; 404 indicator_used[i]=1;
405 return indicators+i; 405 return indicators+i;
@@ -1408,8 +1408,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1408 if (q->hydra_gives_outbound_pcis) { 1408 if (q->hydra_gives_outbound_pcis) {
1409 if (!q->siga_sync_done_on_thinints) { 1409 if (!q->siga_sync_done_on_thinints) {
1410 SYNC_MEMORY_ALL; 1410 SYNC_MEMORY_ALL;
1411 } else if ((!q->siga_sync_done_on_outb_tis)&& 1411 } else if (!q->siga_sync_done_on_outb_tis) {
1412 (q->hydra_gives_outbound_pcis)) {
1413 SYNC_MEMORY_ALL_OUTB; 1412 SYNC_MEMORY_ALL_OUTB;
1414 } 1413 }
1415 } else { 1414 } else {
@@ -1911,8 +1910,7 @@ qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1911 } 1910 }
1912} 1911}
1913 1912
1914static int 1913static void tiqdio_thinint_handler(void *ind, void *drv_data)
1915tiqdio_thinint_handler(void)
1916{ 1914{
1917 QDIO_DBF_TEXT4(0,trace,"thin_int"); 1915 QDIO_DBF_TEXT4(0,trace,"thin_int");
1918 1916
@@ -1925,7 +1923,6 @@ tiqdio_thinint_handler(void)
1925 tiqdio_clear_global_summary(); 1923 tiqdio_clear_global_summary();
1926 1924
1927 tiqdio_inbound_checks(); 1925 tiqdio_inbound_checks();
1928 return 0;
1929} 1926}
1930 1927
1931static void 1928static void
@@ -2445,7 +2442,7 @@ tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2445 real_addr_dev_st_chg_ind=0; 2442 real_addr_dev_st_chg_ind=0;
2446 } else { 2443 } else {
2447 real_addr_local_summary_bit= 2444 real_addr_local_summary_bit=
2448 virt_to_phys((volatile void *)indicators); 2445 virt_to_phys((volatile void *)tiqdio_ind);
2449 real_addr_dev_st_chg_ind= 2446 real_addr_dev_st_chg_ind=
2450 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); 2447 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2451 } 2448 }
@@ -3740,23 +3737,25 @@ static void
3740tiqdio_register_thinints(void) 3737tiqdio_register_thinints(void)
3741{ 3738{
3742 char dbf_text[20]; 3739 char dbf_text[20];
3743 register_thinint_result= 3740
3744 s390_register_adapter_interrupt(&tiqdio_thinint_handler); 3741 tiqdio_ind =
3745 if (register_thinint_result) { 3742 s390_register_adapter_interrupt(&tiqdio_thinint_handler, NULL);
3746 sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff)); 3743 if (IS_ERR(tiqdio_ind)) {
3744 sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_ind));
3747 QDIO_DBF_TEXT0(0,setup,dbf_text); 3745 QDIO_DBF_TEXT0(0,setup,dbf_text);
3748 QDIO_PRINT_ERR("failed to register adapter handler " \ 3746 QDIO_PRINT_ERR("failed to register adapter handler " \
3749 "(rc=%i).\nAdapter interrupts might " \ 3747 "(rc=%li).\nAdapter interrupts might " \
3750 "not work. Continuing.\n", 3748 "not work. Continuing.\n",
3751 register_thinint_result); 3749 PTR_ERR(tiqdio_ind));
3750 tiqdio_ind = NULL;
3752 } 3751 }
3753} 3752}
3754 3753
3755static void 3754static void
3756tiqdio_unregister_thinints(void) 3755tiqdio_unregister_thinints(void)
3757{ 3756{
3758 if (!register_thinint_result) 3757 if (tiqdio_ind)
3759 s390_unregister_adapter_interrupt(&tiqdio_thinint_handler); 3758 s390_unregister_adapter_interrupt(tiqdio_ind);
3760} 3759}
3761 3760
3762static int 3761static int
@@ -3768,8 +3767,8 @@ qdio_get_qdio_memory(void)
3768 for (i=1;i<INDICATORS_PER_CACHELINE;i++) 3767 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3769 indicator_used[i]=0; 3768 indicator_used[i]=0;
3770 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), 3769 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3771 GFP_KERNEL); 3770 GFP_KERNEL);
3772 if (!indicators) 3771 if (!indicators)
3773 return -ENOMEM; 3772 return -ENOMEM;
3774 return 0; 3773 return 0;
3775} 3774}
@@ -3780,7 +3779,6 @@ qdio_release_qdio_memory(void)
3780 kfree(indicators); 3779 kfree(indicators);
3781} 3780}
3782 3781
3783
3784static void 3782static void
3785qdio_unregister_dbf_views(void) 3783qdio_unregister_dbf_views(void)
3786{ 3784{
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 6d7aad18f6f0..37870e4e938e 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -57,7 +57,7 @@
57 of the queue to 0 */ 57 of the queue to 0 */
58 58
59#define QDIO_ESTABLISH_TIMEOUT (1*HZ) 59#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
60#define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10) 60#define QDIO_ACTIVATE_TIMEOUT (5*HZ)
61#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) 61#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
62#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) 62#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
63#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) 63#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)