aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/Makefile2
-rw-r--r--drivers/s390/cio/ccwreq.c328
-rw-r--r--drivers/s390/cio/cio.h8
-rw-r--r--drivers/s390/cio/css.c57
-rw-r--r--drivers/s390/cio/css.h3
-rw-r--r--drivers/s390/cio/device.c1006
-rw-r--r--drivers/s390/cio/device.h25
-rw-r--r--drivers/s390/cio/device_fsm.c411
-rw-r--r--drivers/s390/cio/device_id.c375
-rw-r--r--drivers/s390/cio/device_ops.c142
-rw-r--r--drivers/s390/cio/device_pgid.c963
-rw-r--r--drivers/s390/cio/device_status.c3
-rw-r--r--drivers/s390/cio/io_sch.h73
13 files changed, 1668 insertions, 1728 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index fa4c9662f65e..d033414f7599 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \ 5obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
6 fcx.o itcw.o crw.o 6 fcx.o itcw.o crw.o ccwreq.o
7ccw_device-objs += device.o device_fsm.o device_ops.o 7ccw_device-objs += device.o device_fsm.o device_ops.o
8ccw_device-objs += device_id.o device_pgid.o device_status.o 8ccw_device-objs += device_id.o device_pgid.o device_status.o
9obj-y += ccw_device.o cmf.o 9obj-y += ccw_device.o cmf.o
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
new file mode 100644
index 000000000000..9509e3860934
--- /dev/null
+++ b/drivers/s390/cio/ccwreq.c
@@ -0,0 +1,328 @@
1/*
2 * Handling of internal CCW device requests.
3 *
4 * Copyright IBM Corp. 2009
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */
7
8#include <linux/types.h>
9#include <linux/err.h>
10#include <asm/ccwdev.h>
11#include <asm/cio.h>
12
13#include "io_sch.h"
14#include "cio.h"
15#include "device.h"
16#include "cio_debug.h"
17
18/**
19 * lpm_adjust - adjust path mask
20 * @lpm: path mask to adjust
21 * @mask: mask of available paths
22 *
23 * Shift @lpm right until @lpm and @mask have at least one bit in common or
24 * until @lpm is zero. Return the resulting lpm.
25 */
26int lpm_adjust(int lpm, int mask)
27{
28 while (lpm && ((lpm & mask) == 0))
29 lpm >>= 1;
30 return lpm;
31}
32
33/*
34 * Adjust path mask to use next path and reset retry count. Return resulting
35 * path mask.
36 */
37static u16 ccwreq_next_path(struct ccw_device *cdev)
38{
39 struct ccw_request *req = &cdev->private->req;
40
41 req->retries = req->maxretries;
42 req->mask = lpm_adjust(req->mask >>= 1, req->lpm);
43
44 return req->mask;
45}
46
47/*
48 * Clean up device state and report to callback.
49 */
50static void ccwreq_stop(struct ccw_device *cdev, int rc)
51{
52 struct subchannel *sch = to_subchannel(cdev->dev.parent);
53 struct ccw_request *req = &cdev->private->req;
54
55 if (req->done)
56 return;
57 req->done = 1;
58 ccw_device_set_timeout(cdev, 0);
59 memset(&cdev->private->irb, 0, sizeof(struct irb));
60 sch->lpm = sch->schib.pmcw.pam;
61 if (rc && rc != -ENODEV && req->drc)
62 rc = req->drc;
63 req->callback(cdev, req->data, rc);
64}
65
66/*
67 * (Re-)Start the operation until retries and paths are exhausted.
68 */
69static void ccwreq_do(struct ccw_device *cdev)
70{
71 struct ccw_request *req = &cdev->private->req;
72 struct subchannel *sch = to_subchannel(cdev->dev.parent);
73 struct ccw1 *cp = req->cp;
74 int rc = -EACCES;
75
76 while (req->mask) {
77 if (req->retries-- == 0) {
78 /* Retries exhausted, try next path. */
79 ccwreq_next_path(cdev);
80 continue;
81 }
82 /* Perform start function. */
83 sch->lpm = 0xff;
84 memset(&cdev->private->irb, 0, sizeof(struct irb));
85 rc = cio_start(sch, cp, (u8) req->mask);
86 if (rc == 0) {
87 /* I/O started successfully. */
88 ccw_device_set_timeout(cdev, req->timeout);
89 return;
90 }
91 if (rc == -ENODEV) {
92 /* Permanent device error. */
93 break;
94 }
95 if (rc == -EACCES) {
96 /* Permant path error. */
97 ccwreq_next_path(cdev);
98 continue;
99 }
100 /* Temporary improper status. */
101 rc = cio_clear(sch);
102 if (rc)
103 break;
104 return;
105 }
106 ccwreq_stop(cdev, rc);
107}
108
109/**
110 * ccw_request_start - perform I/O request
111 * @cdev: ccw device
112 *
113 * Perform the I/O request specified by cdev->req.
114 */
115void ccw_request_start(struct ccw_device *cdev)
116{
117 struct ccw_request *req = &cdev->private->req;
118
119 /* Try all paths twice to counter link flapping. */
120 req->mask = 0x8080;
121 req->retries = req->maxretries;
122 req->mask = lpm_adjust(req->mask, req->lpm);
123 req->drc = 0;
124 req->done = 0;
125 req->cancel = 0;
126 if (!req->mask)
127 goto out_nopath;
128 ccwreq_do(cdev);
129 return;
130
131out_nopath:
132 ccwreq_stop(cdev, -EACCES);
133}
134
135/**
136 * ccw_request_cancel - cancel running I/O request
137 * @cdev: ccw device
138 *
139 * Cancel the I/O request specified by cdev->req. Return non-zero if request
140 * has already finished, zero otherwise.
141 */
142int ccw_request_cancel(struct ccw_device *cdev)
143{
144 struct subchannel *sch = to_subchannel(cdev->dev.parent);
145 struct ccw_request *req = &cdev->private->req;
146 int rc;
147
148 if (req->done)
149 return 1;
150 req->cancel = 1;
151 rc = cio_clear(sch);
152 if (rc)
153 ccwreq_stop(cdev, rc);
154 return 0;
155}
156
157/*
158 * Return the status of the internal I/O started on the specified ccw device.
159 * Perform BASIC SENSE if required.
160 */
161static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
162{
163 struct irb *irb = &cdev->private->irb;
164 struct cmd_scsw *scsw = &irb->scsw.cmd;
165
166 /* Perform BASIC SENSE if needed. */
167 if (ccw_device_accumulate_and_sense(cdev, lcirb))
168 return IO_RUNNING;
169 /* Check for halt/clear interrupt. */
170 if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
171 return IO_KILLED;
172 /* Check for path error. */
173 if (scsw->cc == 3 || scsw->pno)
174 return IO_PATH_ERROR;
175 /* Handle BASIC SENSE data. */
176 if (irb->esw.esw0.erw.cons) {
177 CIO_TRACE_EVENT(2, "sensedata");
178 CIO_HEX_EVENT(2, &cdev->private->dev_id,
179 sizeof(struct ccw_dev_id));
180 CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
181 /* Check for command reject. */
182 if (irb->ecw[0] & SNS0_CMD_REJECT)
183 return IO_REJECTED;
184 /* Assume that unexpected SENSE data implies an error. */
185 return IO_STATUS_ERROR;
186 }
187 /* Check for channel errors. */
188 if (scsw->cstat != 0)
189 return IO_STATUS_ERROR;
190 /* Check for device errors. */
191 if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
192 return IO_STATUS_ERROR;
193 /* Check for final state. */
194 if (!(scsw->dstat & DEV_STAT_DEV_END))
195 return IO_RUNNING;
196 /* Check for other improper status. */
197 if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
198 return IO_STATUS_ERROR;
199 return IO_DONE;
200}
201
202/*
203 * Log ccw request status.
204 */
205static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
206{
207 struct ccw_request *req = &cdev->private->req;
208 struct {
209 struct ccw_dev_id dev_id;
210 u16 retries;
211 u8 lpm;
212 u8 status;
213 } __attribute__ ((packed)) data;
214 data.dev_id = cdev->private->dev_id;
215 data.retries = req->retries;
216 data.lpm = (u8) req->mask;
217 data.status = (u8) status;
218 CIO_TRACE_EVENT(2, "reqstat");
219 CIO_HEX_EVENT(2, &data, sizeof(data));
220}
221
222/**
223 * ccw_request_handler - interrupt handler for I/O request procedure.
224 * @cdev: ccw device
225 *
226 * Handle interrupt during I/O request procedure.
227 */
228void ccw_request_handler(struct ccw_device *cdev)
229{
230 struct ccw_request *req = &cdev->private->req;
231 struct irb *irb = (struct irb *) __LC_IRB;
232 enum io_status status;
233 int rc = -EOPNOTSUPP;
234
235 /* Check status of I/O request. */
236 status = ccwreq_status(cdev, irb);
237 if (req->filter)
238 status = req->filter(cdev, req->data, irb, status);
239 if (status != IO_RUNNING)
240 ccw_device_set_timeout(cdev, 0);
241 if (status != IO_DONE && status != IO_RUNNING)
242 ccwreq_log_status(cdev, status);
243 switch (status) {
244 case IO_DONE:
245 break;
246 case IO_RUNNING:
247 return;
248 case IO_REJECTED:
249 goto err;
250 case IO_PATH_ERROR:
251 goto out_next_path;
252 case IO_STATUS_ERROR:
253 goto out_restart;
254 case IO_KILLED:
255 /* Check if request was cancelled on purpose. */
256 if (req->cancel) {
257 rc = -EIO;
258 goto err;
259 }
260 goto out_restart;
261 }
262 /* Check back with request initiator. */
263 if (!req->check)
264 goto out;
265 switch (req->check(cdev, req->data)) {
266 case 0:
267 break;
268 case -EAGAIN:
269 goto out_restart;
270 case -EACCES:
271 goto out_next_path;
272 default:
273 goto err;
274 }
275out:
276 ccwreq_stop(cdev, 0);
277 return;
278
279out_next_path:
280 /* Try next path and restart I/O. */
281 if (!ccwreq_next_path(cdev)) {
282 rc = -EACCES;
283 goto err;
284 }
285out_restart:
286 /* Restart. */
287 ccwreq_do(cdev);
288 return;
289err:
290 ccwreq_stop(cdev, rc);
291}
292
293
294/**
295 * ccw_request_timeout - timeout handler for I/O request procedure
296 * @cdev: ccw device
297 *
298 * Handle timeout during I/O request procedure.
299 */
300void ccw_request_timeout(struct ccw_device *cdev)
301{
302 struct subchannel *sch = to_subchannel(cdev->dev.parent);
303 struct ccw_request *req = &cdev->private->req;
304 int rc;
305
306 if (!ccwreq_next_path(cdev)) {
307 /* set the final return code for this request */
308 req->drc = -ETIME;
309 }
310 rc = cio_clear(sch);
311 if (rc)
312 goto err;
313 return;
314
315err:
316 ccwreq_stop(cdev, rc);
317}
318
319/**
320 * ccw_request_notoper - notoper handler for I/O request procedure
321 * @cdev: ccw device
322 *
323 * Handle timeout during I/O request procedure.
324 */
325void ccw_request_notoper(struct ccw_device *cdev)
326{
327 ccwreq_stop(cdev, -ENODEV);
328}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 2e43558c704b..bf7f80f5a330 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,6 +68,11 @@ struct schib {
68 __u8 mda[4]; /* model dependent area */ 68 __u8 mda[4]; /* model dependent area */
69} __attribute__ ((packed,aligned(4))); 69} __attribute__ ((packed,aligned(4)));
70 70
71enum sch_todo {
72 SCH_TODO_NOTHING,
73 SCH_TODO_UNREG,
74};
75
71/* subchannel data structure used by I/O subroutines */ 76/* subchannel data structure used by I/O subroutines */
72struct subchannel { 77struct subchannel {
73 struct subchannel_id schid; 78 struct subchannel_id schid;
@@ -95,7 +100,8 @@ struct subchannel {
95 struct device dev; /* entry in device tree */ 100 struct device dev; /* entry in device tree */
96 struct css_driver *driver; 101 struct css_driver *driver;
97 void *private; /* private per subchannel type data */ 102 void *private; /* private per subchannel type data */
98 struct work_struct work; 103 enum sch_todo todo;
104 struct work_struct todo_work;
99 struct schib_config config; 105 struct schib_config config;
100} __attribute__ ((aligned(8))); 106} __attribute__ ((aligned(8)));
101 107
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 91c25706fa83..92ff88ac1107 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -133,6 +133,8 @@ out:
133 return rc; 133 return rc;
134} 134}
135 135
136static void css_sch_todo(struct work_struct *work);
137
136static struct subchannel * 138static struct subchannel *
137css_alloc_subchannel(struct subchannel_id schid) 139css_alloc_subchannel(struct subchannel_id schid)
138{ 140{
@@ -147,6 +149,7 @@ css_alloc_subchannel(struct subchannel_id schid)
147 kfree(sch); 149 kfree(sch);
148 return ERR_PTR(ret); 150 return ERR_PTR(ret);
149 } 151 }
152 INIT_WORK(&sch->todo_work, css_sch_todo);
150 return sch; 153 return sch;
151} 154}
152 155
@@ -190,6 +193,51 @@ void css_sch_device_unregister(struct subchannel *sch)
190} 193}
191EXPORT_SYMBOL_GPL(css_sch_device_unregister); 194EXPORT_SYMBOL_GPL(css_sch_device_unregister);
192 195
196static void css_sch_todo(struct work_struct *work)
197{
198 struct subchannel *sch;
199 enum sch_todo todo;
200
201 sch = container_of(work, struct subchannel, todo_work);
202 /* Find out todo. */
203 spin_lock_irq(sch->lock);
204 todo = sch->todo;
205 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
206 sch->schid.sch_no, todo);
207 sch->todo = SCH_TODO_NOTHING;
208 spin_unlock_irq(sch->lock);
209 /* Perform todo. */
210 if (todo == SCH_TODO_UNREG)
211 css_sch_device_unregister(sch);
212 /* Release workqueue ref. */
213 put_device(&sch->dev);
214}
215
216/**
217 * css_sched_sch_todo - schedule a subchannel operation
218 * @sch: subchannel
219 * @todo: todo
220 *
221 * Schedule the operation identified by @todo to be performed on the slow path
222 * workqueue. Do nothing if another operation with higher priority is already
223 * scheduled. Needs to be called with subchannel lock held.
224 */
225void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
226{
227 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
228 sch->schid.ssid, sch->schid.sch_no, todo);
229 if (sch->todo >= todo)
230 return;
231 /* Get workqueue ref. */
232 if (!get_device(&sch->dev))
233 return;
234 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev);
238 }
239}
240
193static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 241static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
194{ 242{
195 int i; 243 int i;
@@ -376,8 +424,8 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
376 /* Unusable - ignore. */ 424 /* Unusable - ignore. */
377 return 0; 425 return 0;
378 } 426 }
379 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 427 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
380 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 428 schid.sch_no);
381 429
382 return css_probe_device(schid); 430 return css_probe_device(schid);
383} 431}
@@ -394,6 +442,10 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
394 "Got subchannel machine check but " 442 "Got subchannel machine check but "
395 "no sch_event handler provided.\n"); 443 "no sch_event handler provided.\n");
396 } 444 }
445 if (ret != 0 && ret != -EAGAIN) {
446 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
447 sch->schid.ssid, sch->schid.sch_no, ret);
448 }
397 return ret; 449 return ret;
398} 450}
399 451
@@ -684,6 +736,7 @@ static int __init setup_css(int nr)
684 css->pseudo_subchannel->dev.parent = &css->device; 736 css->pseudo_subchannel->dev.parent = &css->device;
685 css->pseudo_subchannel->dev.release = css_subchannel_release; 737 css->pseudo_subchannel->dev.release = css_subchannel_release;
686 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 738 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
739 mutex_init(&css->pseudo_subchannel->reg_mutex);
687 ret = cio_create_sch_lock(css->pseudo_subchannel); 740 ret = cio_create_sch_lock(css->pseudo_subchannel);
688 if (ret) { 741 if (ret) {
689 kfree(css->pseudo_subchannel); 742 kfree(css->pseudo_subchannel);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 68d6b0bf151c..fe84b92cde60 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -11,6 +11,8 @@
11#include <asm/chpid.h> 11#include <asm/chpid.h>
12#include <asm/schid.h> 12#include <asm/schid.h>
13 13
14#include "cio.h"
15
14/* 16/*
15 * path grouping stuff 17 * path grouping stuff
16 */ 18 */
@@ -151,4 +153,5 @@ int css_sch_is_valid(struct schib *);
151 153
152extern struct workqueue_struct *slow_path_wq; 154extern struct workqueue_struct *slow_path_wq;
153void css_wait_for_slow_path(void); 155void css_wait_for_slow_path(void);
156void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
154#endif 157#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 2490b741e16a..9fecfb4223a8 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -7,6 +7,10 @@
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 */ 9 */
10
11#define KMSG_COMPONENT "cio"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
10#include <linux/module.h> 14#include <linux/module.h>
11#include <linux/init.h> 15#include <linux/init.h>
12#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -299,53 +303,18 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
299 303
300static void ccw_device_unregister(struct ccw_device *cdev) 304static void ccw_device_unregister(struct ccw_device *cdev)
301{ 305{
302 if (test_and_clear_bit(1, &cdev->private->registered)) { 306 if (device_is_registered(&cdev->dev)) {
307 /* Undo device_add(). */
303 device_del(&cdev->dev); 308 device_del(&cdev->dev);
309 }
310 if (cdev->private->flags.initialized) {
311 cdev->private->flags.initialized = 0;
304 /* Release reference from device_initialize(). */ 312 /* Release reference from device_initialize(). */
305 put_device(&cdev->dev); 313 put_device(&cdev->dev);
306 } 314 }
307} 315}
308 316
309static void ccw_device_remove_orphan_cb(struct work_struct *work) 317static void io_subchannel_quiesce(struct subchannel *);
310{
311 struct ccw_device_private *priv;
312 struct ccw_device *cdev;
313
314 priv = container_of(work, struct ccw_device_private, kick_work);
315 cdev = priv->cdev;
316 ccw_device_unregister(cdev);
317 /* Release cdev reference for workqueue processing. */
318 put_device(&cdev->dev);
319}
320
321static void
322ccw_device_remove_disconnected(struct ccw_device *cdev)
323{
324 unsigned long flags;
325
326 /*
327 * Forced offline in disconnected state means
328 * 'throw away device'.
329 */
330 if (ccw_device_is_orphan(cdev)) {
331 /*
332 * Deregister ccw device.
333 * Unfortunately, we cannot do this directly from the
334 * attribute method.
335 */
336 /* Get cdev reference for workqueue processing. */
337 if (!get_device(&cdev->dev))
338 return;
339 spin_lock_irqsave(cdev->ccwlock, flags);
340 cdev->private->state = DEV_STATE_NOT_OPER;
341 spin_unlock_irqrestore(cdev->ccwlock, flags);
342 PREPARE_WORK(&cdev->private->kick_work,
343 ccw_device_remove_orphan_cb);
344 queue_work(slow_path_wq, &cdev->private->kick_work);
345 } else
346 /* Deregister subchannel, which will kill the ccw device. */
347 ccw_device_schedule_sch_unregister(cdev);
348}
349 318
350/** 319/**
351 * ccw_device_set_offline() - disable a ccw device for I/O 320 * ccw_device_set_offline() - disable a ccw device for I/O
@@ -360,7 +329,8 @@ ccw_device_remove_disconnected(struct ccw_device *cdev)
360 */ 329 */
361int ccw_device_set_offline(struct ccw_device *cdev) 330int ccw_device_set_offline(struct ccw_device *cdev)
362{ 331{
363 int ret; 332 struct subchannel *sch;
333 int ret, state;
364 334
365 if (!cdev) 335 if (!cdev)
366 return -ENODEV; 336 return -ENODEV;
@@ -374,6 +344,7 @@ int ccw_device_set_offline(struct ccw_device *cdev)
374 } 344 }
375 cdev->online = 0; 345 cdev->online = 0;
376 spin_lock_irq(cdev->ccwlock); 346 spin_lock_irq(cdev->ccwlock);
347 sch = to_subchannel(cdev->dev.parent);
377 /* Wait until a final state or DISCONNECTED is reached */ 348 /* Wait until a final state or DISCONNECTED is reached */
378 while (!dev_fsm_final_state(cdev) && 349 while (!dev_fsm_final_state(cdev) &&
379 cdev->private->state != DEV_STATE_DISCONNECTED) { 350 cdev->private->state != DEV_STATE_DISCONNECTED) {
@@ -382,20 +353,37 @@ int ccw_device_set_offline(struct ccw_device *cdev)
382 cdev->private->state == DEV_STATE_DISCONNECTED)); 353 cdev->private->state == DEV_STATE_DISCONNECTED));
383 spin_lock_irq(cdev->ccwlock); 354 spin_lock_irq(cdev->ccwlock);
384 } 355 }
385 ret = ccw_device_offline(cdev); 356 do {
386 if (ret) 357 ret = ccw_device_offline(cdev);
387 goto error; 358 if (!ret)
359 break;
360 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
361 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
362 cdev->private->dev_id.devno);
363 if (ret != -EBUSY)
364 goto error;
365 state = cdev->private->state;
366 spin_unlock_irq(cdev->ccwlock);
367 io_subchannel_quiesce(sch);
368 spin_lock_irq(cdev->ccwlock);
369 cdev->private->state = state;
370 } while (ret == -EBUSY);
388 spin_unlock_irq(cdev->ccwlock); 371 spin_unlock_irq(cdev->ccwlock);
389 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 372 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
390 cdev->private->state == DEV_STATE_DISCONNECTED)); 373 cdev->private->state == DEV_STATE_DISCONNECTED));
374 /* Inform the user if set offline failed. */
375 if (cdev->private->state == DEV_STATE_BOXED) {
376 pr_warning("%s: The device entered boxed state while "
377 "being set offline\n", dev_name(&cdev->dev));
378 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
379 pr_warning("%s: The device stopped operating while "
380 "being set offline\n", dev_name(&cdev->dev));
381 }
391 /* Give up reference from ccw_device_set_online(). */ 382 /* Give up reference from ccw_device_set_online(). */
392 put_device(&cdev->dev); 383 put_device(&cdev->dev);
393 return 0; 384 return 0;
394 385
395error: 386error:
396 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device 0.%x.%04x\n",
397 ret, cdev->private->dev_id.ssid,
398 cdev->private->dev_id.devno);
399 cdev->private->state = DEV_STATE_OFFLINE; 387 cdev->private->state = DEV_STATE_OFFLINE;
400 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 388 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
401 spin_unlock_irq(cdev->ccwlock); 389 spin_unlock_irq(cdev->ccwlock);
@@ -448,6 +436,16 @@ int ccw_device_set_online(struct ccw_device *cdev)
448 if ((cdev->private->state != DEV_STATE_ONLINE) && 436 if ((cdev->private->state != DEV_STATE_ONLINE) &&
449 (cdev->private->state != DEV_STATE_W4SENSE)) { 437 (cdev->private->state != DEV_STATE_W4SENSE)) {
450 spin_unlock_irq(cdev->ccwlock); 438 spin_unlock_irq(cdev->ccwlock);
439 /* Inform the user that set online failed. */
440 if (cdev->private->state == DEV_STATE_BOXED) {
441 pr_warning("%s: Setting the device online failed "
442 "because it is boxed\n",
443 dev_name(&cdev->dev));
444 } else if (cdev->private->state == DEV_STATE_NOT_OPER) {
445 pr_warning("%s: Setting the device online failed "
446 "because it is not operational\n",
447 dev_name(&cdev->dev));
448 }
451 /* Give up online reference since onlining failed. */ 449 /* Give up online reference since onlining failed. */
452 put_device(&cdev->dev); 450 put_device(&cdev->dev);
453 return -ENODEV; 451 return -ENODEV;
@@ -494,27 +492,22 @@ error:
494 492
495static int online_store_handle_offline(struct ccw_device *cdev) 493static int online_store_handle_offline(struct ccw_device *cdev)
496{ 494{
497 if (cdev->private->state == DEV_STATE_DISCONNECTED) 495 if (cdev->private->state == DEV_STATE_DISCONNECTED) {
498 ccw_device_remove_disconnected(cdev); 496 spin_lock_irq(cdev->ccwlock);
499 else if (cdev->online && cdev->drv && cdev->drv->set_offline) 497 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
498 spin_unlock_irq(cdev->ccwlock);
499 } else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500 return ccw_device_set_offline(cdev); 500 return ccw_device_set_offline(cdev);
501 return 0; 501 return 0;
502} 502}
503 503
504static int online_store_recog_and_online(struct ccw_device *cdev) 504static int online_store_recog_and_online(struct ccw_device *cdev)
505{ 505{
506 int ret;
507
508 /* Do device recognition, if needed. */ 506 /* Do device recognition, if needed. */
509 if (cdev->private->state == DEV_STATE_BOXED) { 507 if (cdev->private->state == DEV_STATE_BOXED) {
510 ret = ccw_device_recognition(cdev); 508 spin_lock_irq(cdev->ccwlock);
511 if (ret) { 509 ccw_device_recognition(cdev);
512 CIO_MSG_EVENT(0, "Couldn't start recognition " 510 spin_unlock_irq(cdev->ccwlock);
513 "for device 0.%x.%04x (ret=%d)\n",
514 cdev->private->dev_id.ssid,
515 cdev->private->dev_id.devno, ret);
516 return ret;
517 }
518 wait_event(cdev->private->wait_q, 511 wait_event(cdev->private->wait_q,
519 cdev->private->flags.recog_done); 512 cdev->private->flags.recog_done);
520 if (cdev->private->state != DEV_STATE_OFFLINE) 513 if (cdev->private->state != DEV_STATE_OFFLINE)
@@ -553,11 +546,10 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
553 int force, ret; 546 int force, ret;
554 unsigned long i; 547 unsigned long i;
555 548
556 if ((cdev->private->state != DEV_STATE_OFFLINE && 549 if (!dev_fsm_final_state(cdev) &&
557 cdev->private->state != DEV_STATE_ONLINE && 550 cdev->private->state != DEV_STATE_DISCONNECTED)
558 cdev->private->state != DEV_STATE_BOXED && 551 return -EAGAIN;
559 cdev->private->state != DEV_STATE_DISCONNECTED) || 552 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
560 atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
561 return -EAGAIN; 553 return -EAGAIN;
562 554
563 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 555 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
@@ -665,81 +657,31 @@ static int ccw_device_register(struct ccw_device *cdev)
665 cdev->private->dev_id.devno); 657 cdev->private->dev_id.devno);
666 if (ret) 658 if (ret)
667 return ret; 659 return ret;
668 ret = device_add(dev); 660 return device_add(dev);
669 if (ret)
670 return ret;
671
672 set_bit(1, &cdev->private->registered);
673 return ret;
674} 661}
675 662
676struct match_data { 663static int match_dev_id(struct device *dev, void *data)
677 struct ccw_dev_id dev_id;
678 struct ccw_device * sibling;
679};
680
681static int
682match_devno(struct device * dev, void * data)
683{
684 struct match_data * d = data;
685 struct ccw_device * cdev;
686
687 cdev = to_ccwdev(dev);
688 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
689 !ccw_device_is_orphan(cdev) &&
690 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
691 (cdev != d->sibling))
692 return 1;
693 return 0;
694}
695
696static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
697 struct ccw_device *sibling)
698{
699 struct device *dev;
700 struct match_data data;
701
702 data.dev_id = *dev_id;
703 data.sibling = sibling;
704 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
705
706 return dev ? to_ccwdev(dev) : NULL;
707}
708
709static int match_orphan(struct device *dev, void *data)
710{ 664{
711 struct ccw_dev_id *dev_id; 665 struct ccw_device *cdev = to_ccwdev(dev);
712 struct ccw_device *cdev; 666 struct ccw_dev_id *dev_id = data;
713 667
714 dev_id = data;
715 cdev = to_ccwdev(dev);
716 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 668 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
717} 669}
718 670
719static struct ccw_device * 671static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
720get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
721 struct ccw_dev_id *dev_id)
722{ 672{
723 struct device *dev; 673 struct device *dev;
724 674
725 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id, 675 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
726 match_orphan);
727 676
728 return dev ? to_ccwdev(dev) : NULL; 677 return dev ? to_ccwdev(dev) : NULL;
729} 678}
730 679
731void ccw_device_do_unbind_bind(struct work_struct *work) 680static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
732{ 681{
733 struct ccw_device_private *priv;
734 struct ccw_device *cdev;
735 struct subchannel *sch;
736 int ret; 682 int ret;
737 683
738 priv = container_of(work, struct ccw_device_private, kick_work); 684 if (device_is_registered(&cdev->dev)) {
739 cdev = priv->cdev;
740 sch = to_subchannel(cdev->dev.parent);
741
742 if (test_bit(1, &cdev->private->registered)) {
743 device_release_driver(&cdev->dev); 685 device_release_driver(&cdev->dev);
744 ret = device_attach(&cdev->dev); 686 ret = device_attach(&cdev->dev);
745 WARN_ON(ret == -ENODEV); 687 WARN_ON(ret == -ENODEV);
@@ -773,6 +715,8 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
773 return ERR_PTR(-ENOMEM); 715 return ERR_PTR(-ENOMEM);
774} 716}
775 717
718static void ccw_device_todo(struct work_struct *work);
719
776static int io_subchannel_initialize_dev(struct subchannel *sch, 720static int io_subchannel_initialize_dev(struct subchannel *sch,
777 struct ccw_device *cdev) 721 struct ccw_device *cdev)
778{ 722{
@@ -780,7 +724,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
780 atomic_set(&cdev->private->onoff, 0); 724 atomic_set(&cdev->private->onoff, 0);
781 cdev->dev.parent = &sch->dev; 725 cdev->dev.parent = &sch->dev;
782 cdev->dev.release = ccw_device_release; 726 cdev->dev.release = ccw_device_release;
783 INIT_WORK(&cdev->private->kick_work, NULL); 727 INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
784 cdev->dev.groups = ccwdev_attr_groups; 728 cdev->dev.groups = ccwdev_attr_groups;
785 /* Do first half of device_register. */ 729 /* Do first half of device_register. */
786 device_initialize(&cdev->dev); 730 device_initialize(&cdev->dev);
@@ -789,6 +733,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
789 put_device(&cdev->dev); 733 put_device(&cdev->dev);
790 return -ENODEV; 734 return -ENODEV;
791 } 735 }
736 cdev->private->flags.initialized = 1;
792 return 0; 737 return 0;
793} 738}
794 739
@@ -806,76 +751,7 @@ static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
806 return cdev; 751 return cdev;
807} 752}
808 753
809static int io_subchannel_recog(struct ccw_device *, struct subchannel *); 754static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
810
811static void sch_attach_device(struct subchannel *sch,
812 struct ccw_device *cdev)
813{
814 css_update_ssd_info(sch);
815 spin_lock_irq(sch->lock);
816 sch_set_cdev(sch, cdev);
817 cdev->private->schid = sch->schid;
818 cdev->ccwlock = sch->lock;
819 ccw_device_trigger_reprobe(cdev);
820 spin_unlock_irq(sch->lock);
821}
822
823static void sch_attach_disconnected_device(struct subchannel *sch,
824 struct ccw_device *cdev)
825{
826 struct subchannel *other_sch;
827 int ret;
828
829 /* Get reference for new parent. */
830 if (!get_device(&sch->dev))
831 return;
832 other_sch = to_subchannel(cdev->dev.parent);
833 /* Note: device_move() changes cdev->dev.parent */
834 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
835 if (ret) {
836 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
837 "(ret=%d)!\n", cdev->private->dev_id.ssid,
838 cdev->private->dev_id.devno, ret);
839 /* Put reference for new parent. */
840 put_device(&sch->dev);
841 return;
842 }
843 sch_set_cdev(other_sch, NULL);
844 /* No need to keep a subchannel without ccw device around. */
845 css_sch_device_unregister(other_sch);
846 sch_attach_device(sch, cdev);
847 /* Put reference for old parent. */
848 put_device(&other_sch->dev);
849}
850
851static void sch_attach_orphaned_device(struct subchannel *sch,
852 struct ccw_device *cdev)
853{
854 int ret;
855 struct subchannel *pseudo_sch;
856
857 /* Get reference for new parent. */
858 if (!get_device(&sch->dev))
859 return;
860 pseudo_sch = to_subchannel(cdev->dev.parent);
861 /*
862 * Try to move the ccw device to its new subchannel.
863 * Note: device_move() changes cdev->dev.parent
864 */
865 ret = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
866 if (ret) {
867 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
868 "failed (ret=%d)!\n",
869 cdev->private->dev_id.ssid,
870 cdev->private->dev_id.devno, ret);
871 /* Put reference for new parent. */
872 put_device(&sch->dev);
873 return;
874 }
875 sch_attach_device(sch, cdev);
876 /* Put reference on pseudo subchannel. */
877 put_device(&pseudo_sch->dev);
878}
879 755
880static void sch_create_and_recog_new_device(struct subchannel *sch) 756static void sch_create_and_recog_new_device(struct subchannel *sch)
881{ 757{
@@ -888,100 +764,19 @@ static void sch_create_and_recog_new_device(struct subchannel *sch)
888 css_sch_device_unregister(sch); 764 css_sch_device_unregister(sch);
889 return; 765 return;
890 } 766 }
891 spin_lock_irq(sch->lock);
892 sch_set_cdev(sch, cdev);
893 spin_unlock_irq(sch->lock);
894 /* Start recognition for the new ccw device. */ 767 /* Start recognition for the new ccw device. */
895 if (io_subchannel_recog(cdev, sch)) { 768 io_subchannel_recog(cdev, sch);
896 spin_lock_irq(sch->lock);
897 sch_set_cdev(sch, NULL);
898 spin_unlock_irq(sch->lock);
899 css_sch_device_unregister(sch);
900 /* Put reference from io_subchannel_create_ccwdev(). */
901 put_device(&sch->dev);
902 /* Give up initial reference. */
903 put_device(&cdev->dev);
904 }
905}
906
907
908void ccw_device_move_to_orphanage(struct work_struct *work)
909{
910 struct ccw_device_private *priv;
911 struct ccw_device *cdev;
912 struct ccw_device *replacing_cdev;
913 struct subchannel *sch;
914 int ret;
915 struct channel_subsystem *css;
916 struct ccw_dev_id dev_id;
917
918 priv = container_of(work, struct ccw_device_private, kick_work);
919 cdev = priv->cdev;
920 sch = to_subchannel(cdev->dev.parent);
921 css = to_css(sch->dev.parent);
922 dev_id.devno = sch->schib.pmcw.dev;
923 dev_id.ssid = sch->schid.ssid;
924
925 /* Increase refcount for pseudo subchannel. */
926 get_device(&css->pseudo_subchannel->dev);
927 /*
928 * Move the orphaned ccw device to the orphanage so the replacing
929 * ccw device can take its place on the subchannel.
930 * Note: device_move() changes cdev->dev.parent
931 */
932 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev,
933 DPM_ORDER_NONE);
934 if (ret) {
935 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
936 "(ret=%d)!\n", cdev->private->dev_id.ssid,
937 cdev->private->dev_id.devno, ret);
938 /* Decrease refcount for pseudo subchannel again. */
939 put_device(&css->pseudo_subchannel->dev);
940 return;
941 }
942 cdev->ccwlock = css->pseudo_subchannel->lock;
943 /*
944 * Search for the replacing ccw device
945 * - among the disconnected devices
946 * - in the orphanage
947 */
948 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
949 if (replacing_cdev) {
950 sch_attach_disconnected_device(sch, replacing_cdev);
951 /* Release reference from get_disc_ccwdev_by_dev_id() */
952 put_device(&replacing_cdev->dev);
953 /* Release reference of subchannel from old cdev. */
954 put_device(&sch->dev);
955 return;
956 }
957 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
958 if (replacing_cdev) {
959 sch_attach_orphaned_device(sch, replacing_cdev);
960 /* Release reference from get_orphaned_ccwdev_by_dev_id() */
961 put_device(&replacing_cdev->dev);
962 /* Release reference of subchannel from old cdev. */
963 put_device(&sch->dev);
964 return;
965 }
966 sch_create_and_recog_new_device(sch);
967 /* Release reference of subchannel from old cdev. */
968 put_device(&sch->dev);
969} 769}
970 770
971/* 771/*
972 * Register recognized device. 772 * Register recognized device.
973 */ 773 */
974static void 774static void io_subchannel_register(struct ccw_device *cdev)
975io_subchannel_register(struct work_struct *work)
976{ 775{
977 struct ccw_device_private *priv;
978 struct ccw_device *cdev;
979 struct subchannel *sch; 776 struct subchannel *sch;
980 int ret; 777 int ret;
981 unsigned long flags; 778 unsigned long flags;
982 779
983 priv = container_of(work, struct ccw_device_private, kick_work);
984 cdev = priv->cdev;
985 sch = to_subchannel(cdev->dev.parent); 780 sch = to_subchannel(cdev->dev.parent);
986 /* 781 /*
987 * Check if subchannel is still registered. It may have become 782 * Check if subchannel is still registered. It may have become
@@ -1033,41 +828,23 @@ out:
1033 cdev->private->flags.recog_done = 1; 828 cdev->private->flags.recog_done = 1;
1034 wake_up(&cdev->private->wait_q); 829 wake_up(&cdev->private->wait_q);
1035out_err: 830out_err:
1036 /* Release reference for workqueue processing. */
1037 put_device(&cdev->dev);
1038 if (atomic_dec_and_test(&ccw_device_init_count)) 831 if (atomic_dec_and_test(&ccw_device_init_count))
1039 wake_up(&ccw_device_init_wq); 832 wake_up(&ccw_device_init_wq);
1040} 833}
1041 834
1042static void ccw_device_call_sch_unregister(struct work_struct *work) 835static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
1043{ 836{
1044 struct ccw_device_private *priv;
1045 struct ccw_device *cdev;
1046 struct subchannel *sch; 837 struct subchannel *sch;
1047 838
1048 priv = container_of(work, struct ccw_device_private, kick_work);
1049 cdev = priv->cdev;
1050 /* Get subchannel reference for local processing. */ 839 /* Get subchannel reference for local processing. */
1051 if (!get_device(cdev->dev.parent)) 840 if (!get_device(cdev->dev.parent))
1052 return; 841 return;
1053 sch = to_subchannel(cdev->dev.parent); 842 sch = to_subchannel(cdev->dev.parent);
1054 css_sch_device_unregister(sch); 843 css_sch_device_unregister(sch);
1055 /* Release cdev reference for workqueue processing.*/
1056 put_device(&cdev->dev);
1057 /* Release subchannel reference for local processing. */ 844 /* Release subchannel reference for local processing. */
1058 put_device(&sch->dev); 845 put_device(&sch->dev);
1059} 846}
1060 847
1061void ccw_device_schedule_sch_unregister(struct ccw_device *cdev)
1062{
1063 /* Get cdev reference for workqueue processing. */
1064 if (!get_device(&cdev->dev))
1065 return;
1066 PREPARE_WORK(&cdev->private->kick_work,
1067 ccw_device_call_sch_unregister);
1068 queue_work(slow_path_wq, &cdev->private->kick_work);
1069}
1070
1071/* 848/*
1072 * subchannel recognition done. Called from the state machine. 849 * subchannel recognition done. Called from the state machine.
1073 */ 850 */
@@ -1083,7 +860,8 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1083 /* Device did not respond in time. */ 860 /* Device did not respond in time. */
1084 case DEV_STATE_NOT_OPER: 861 case DEV_STATE_NOT_OPER:
1085 cdev->private->flags.recog_done = 1; 862 cdev->private->flags.recog_done = 1;
1086 ccw_device_schedule_sch_unregister(cdev); 863 /* Remove device found not operational. */
864 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1087 if (atomic_dec_and_test(&ccw_device_init_count)) 865 if (atomic_dec_and_test(&ccw_device_init_count))
1088 wake_up(&ccw_device_init_wq); 866 wake_up(&ccw_device_init_wq);
1089 break; 867 break;
@@ -1092,22 +870,15 @@ io_subchannel_recog_done(struct ccw_device *cdev)
1092 * We can't register the device in interrupt context so 870 * We can't register the device in interrupt context so
1093 * we schedule a work item. 871 * we schedule a work item.
1094 */ 872 */
1095 if (!get_device(&cdev->dev)) 873 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
1096 break;
1097 PREPARE_WORK(&cdev->private->kick_work,
1098 io_subchannel_register);
1099 queue_work(slow_path_wq, &cdev->private->kick_work);
1100 break; 874 break;
1101 } 875 }
1102} 876}
1103 877
1104static int 878static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1105io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1106{ 879{
1107 int rc;
1108 struct ccw_device_private *priv; 880 struct ccw_device_private *priv;
1109 881
1110 sch_set_cdev(sch, cdev);
1111 cdev->ccwlock = sch->lock; 882 cdev->ccwlock = sch->lock;
1112 883
1113 /* Init private data. */ 884 /* Init private data. */
@@ -1125,62 +896,81 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1125 896
1126 /* Start async. device sensing. */ 897 /* Start async. device sensing. */
1127 spin_lock_irq(sch->lock); 898 spin_lock_irq(sch->lock);
1128 rc = ccw_device_recognition(cdev); 899 sch_set_cdev(sch, cdev);
900 ccw_device_recognition(cdev);
1129 spin_unlock_irq(sch->lock); 901 spin_unlock_irq(sch->lock);
1130 if (rc) {
1131 if (atomic_dec_and_test(&ccw_device_init_count))
1132 wake_up(&ccw_device_init_wq);
1133 }
1134 return rc;
1135} 902}
1136 903
1137static void ccw_device_move_to_sch(struct work_struct *work) 904static int ccw_device_move_to_sch(struct ccw_device *cdev,
905 struct subchannel *sch)
1138{ 906{
1139 struct ccw_device_private *priv; 907 struct subchannel *old_sch;
1140 int rc; 908 int rc, old_enabled = 0;
1141 struct subchannel *sch;
1142 struct ccw_device *cdev;
1143 struct subchannel *former_parent;
1144 909
1145 priv = container_of(work, struct ccw_device_private, kick_work); 910 old_sch = to_subchannel(cdev->dev.parent);
1146 sch = priv->sch; 911 /* Obtain child reference for new parent. */
1147 cdev = priv->cdev;
1148 former_parent = to_subchannel(cdev->dev.parent);
1149 /* Get reference for new parent. */
1150 if (!get_device(&sch->dev)) 912 if (!get_device(&sch->dev))
1151 return; 913 return -ENODEV;
914
915 if (!sch_is_pseudo_sch(old_sch)) {
916 spin_lock_irq(old_sch->lock);
917 old_enabled = old_sch->schib.pmcw.ena;
918 rc = 0;
919 if (old_enabled)
920 rc = cio_disable_subchannel(old_sch);
921 spin_unlock_irq(old_sch->lock);
922 if (rc == -EBUSY) {
923 /* Release child reference for new parent. */
924 put_device(&sch->dev);
925 return rc;
926 }
927 }
928
1152 mutex_lock(&sch->reg_mutex); 929 mutex_lock(&sch->reg_mutex);
1153 /*
1154 * Try to move the ccw device to its new subchannel.
1155 * Note: device_move() changes cdev->dev.parent
1156 */
1157 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 930 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
1158 mutex_unlock(&sch->reg_mutex); 931 mutex_unlock(&sch->reg_mutex);
1159 if (rc) { 932 if (rc) {
1160 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel " 933 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
1161 "0.%x.%04x failed (ret=%d)!\n",
1162 cdev->private->dev_id.ssid, 934 cdev->private->dev_id.ssid,
1163 cdev->private->dev_id.devno, sch->schid.ssid, 935 cdev->private->dev_id.devno, sch->schid.ssid,
1164 sch->schid.sch_no, rc); 936 sch->schib.pmcw.dev, rc);
1165 css_sch_device_unregister(sch); 937 if (old_enabled) {
1166 /* Put reference for new parent again. */ 938 /* Try to reenable the old subchannel. */
939 spin_lock_irq(old_sch->lock);
940 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
941 spin_unlock_irq(old_sch->lock);
942 }
943 /* Release child reference for new parent. */
1167 put_device(&sch->dev); 944 put_device(&sch->dev);
1168 goto out; 945 return rc;
1169 } 946 }
1170 if (!sch_is_pseudo_sch(former_parent)) { 947 /* Clean up old subchannel. */
1171 spin_lock_irq(former_parent->lock); 948 if (!sch_is_pseudo_sch(old_sch)) {
1172 sch_set_cdev(former_parent, NULL); 949 spin_lock_irq(old_sch->lock);
1173 spin_unlock_irq(former_parent->lock); 950 sch_set_cdev(old_sch, NULL);
1174 css_sch_device_unregister(former_parent); 951 spin_unlock_irq(old_sch->lock);
1175 /* Reset intparm to zeroes. */ 952 css_schedule_eval(old_sch->schid);
1176 former_parent->config.intparm = 0;
1177 cio_commit_config(former_parent);
1178 } 953 }
1179 sch_attach_device(sch, cdev); 954 /* Release child reference for old parent. */
1180out: 955 put_device(&old_sch->dev);
1181 /* Put reference for old parent. */ 956 /* Initialize new subchannel. */
1182 put_device(&former_parent->dev); 957 spin_lock_irq(sch->lock);
1183 put_device(&cdev->dev); 958 cdev->private->schid = sch->schid;
959 cdev->ccwlock = sch->lock;
960 if (!sch_is_pseudo_sch(sch))
961 sch_set_cdev(sch, cdev);
962 spin_unlock_irq(sch->lock);
963 if (!sch_is_pseudo_sch(sch))
964 css_update_ssd_info(sch);
965 return 0;
966}
967
968static int ccw_device_move_to_orph(struct ccw_device *cdev)
969{
970 struct subchannel *sch = to_subchannel(cdev->dev.parent);
971 struct channel_subsystem *css = to_css(sch->dev.parent);
972
973 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1184} 974}
1185 975
1186static void io_subchannel_irq(struct subchannel *sch) 976static void io_subchannel_irq(struct subchannel *sch)
@@ -1199,9 +989,6 @@ void io_subchannel_init_config(struct subchannel *sch)
1199{ 989{
1200 memset(&sch->config, 0, sizeof(sch->config)); 990 memset(&sch->config, 0, sizeof(sch->config));
1201 sch->config.csense = 1; 991 sch->config.csense = 1;
1202 /* Use subchannel mp mode when there is more than 1 installed CHPID. */
1203 if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0)
1204 sch->config.mp = 1;
1205} 992}
1206 993
1207static void io_subchannel_init_fields(struct subchannel *sch) 994static void io_subchannel_init_fields(struct subchannel *sch)
@@ -1222,23 +1009,6 @@ static void io_subchannel_init_fields(struct subchannel *sch)
1222 io_subchannel_init_config(sch); 1009 io_subchannel_init_config(sch);
1223} 1010}
1224 1011
1225static void io_subchannel_do_unreg(struct work_struct *work)
1226{
1227 struct subchannel *sch;
1228
1229 sch = container_of(work, struct subchannel, work);
1230 css_sch_device_unregister(sch);
1231 put_device(&sch->dev);
1232}
1233
1234/* Schedule unregister if we have no cdev. */
1235static void io_subchannel_schedule_removal(struct subchannel *sch)
1236{
1237 get_device(&sch->dev);
1238 INIT_WORK(&sch->work, io_subchannel_do_unreg);
1239 queue_work(slow_path_wq, &sch->work);
1240}
1241
1242/* 1012/*
1243 * Note: We always return 0 so that we bind to the device even on error. 1013 * Note: We always return 0 so that we bind to the device even on error.
1244 * This is needed so that our remove function is called on unregister. 1014 * This is needed so that our remove function is called on unregister.
@@ -1247,8 +1017,6 @@ static int io_subchannel_probe(struct subchannel *sch)
1247{ 1017{
1248 struct ccw_device *cdev; 1018 struct ccw_device *cdev;
1249 int rc; 1019 int rc;
1250 unsigned long flags;
1251 struct ccw_dev_id dev_id;
1252 1020
1253 if (cio_is_console(sch->schid)) { 1021 if (cio_is_console(sch->schid)) {
1254 rc = sysfs_create_group(&sch->dev.kobj, 1022 rc = sysfs_create_group(&sch->dev.kobj,
@@ -1268,6 +1036,7 @@ static int io_subchannel_probe(struct subchannel *sch)
1268 cdev = sch_get_cdev(sch); 1036 cdev = sch_get_cdev(sch);
1269 cdev->dev.groups = ccwdev_attr_groups; 1037 cdev->dev.groups = ccwdev_attr_groups;
1270 device_initialize(&cdev->dev); 1038 device_initialize(&cdev->dev);
1039 cdev->private->flags.initialized = 1;
1271 ccw_device_register(cdev); 1040 ccw_device_register(cdev);
1272 /* 1041 /*
1273 * Check if the device is already online. If it is 1042 * Check if the device is already online. If it is
@@ -1292,44 +1061,14 @@ static int io_subchannel_probe(struct subchannel *sch)
1292 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1061 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1293 GFP_KERNEL | GFP_DMA); 1062 GFP_KERNEL | GFP_DMA);
1294 if (!sch->private) 1063 if (!sch->private)
1295 goto out_err; 1064 goto out_schedule;
1296 /* 1065 css_schedule_eval(sch->schid);
1297 * First check if a fitting device may be found amongst the
1298 * disconnected devices or in the orphanage.
1299 */
1300 dev_id.devno = sch->schib.pmcw.dev;
1301 dev_id.ssid = sch->schid.ssid;
1302 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1303 if (!cdev)
1304 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1305 &dev_id);
1306 if (cdev) {
1307 /*
1308 * Schedule moving the device until when we have a registered
1309 * subchannel to move to and succeed the probe. We can
1310 * unregister later again, when the probe is through.
1311 */
1312 cdev->private->sch = sch;
1313 PREPARE_WORK(&cdev->private->kick_work,
1314 ccw_device_move_to_sch);
1315 queue_work(slow_path_wq, &cdev->private->kick_work);
1316 return 0;
1317 }
1318 cdev = io_subchannel_create_ccwdev(sch);
1319 if (IS_ERR(cdev))
1320 goto out_err;
1321 rc = io_subchannel_recog(cdev, sch);
1322 if (rc) {
1323 spin_lock_irqsave(sch->lock, flags);
1324 io_subchannel_recog_done(cdev);
1325 spin_unlock_irqrestore(sch->lock, flags);
1326 }
1327 return 0; 1066 return 0;
1328out_err: 1067
1329 kfree(sch->private);
1330 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1331out_schedule: 1068out_schedule:
1332 io_subchannel_schedule_removal(sch); 1069 spin_lock_irq(sch->lock);
1070 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1071 spin_unlock_irq(sch->lock);
1333 return 0; 1072 return 0;
1334} 1073}
1335 1074
@@ -1337,32 +1076,23 @@ static int
1337io_subchannel_remove (struct subchannel *sch) 1076io_subchannel_remove (struct subchannel *sch)
1338{ 1077{
1339 struct ccw_device *cdev; 1078 struct ccw_device *cdev;
1340 unsigned long flags;
1341 1079
1342 cdev = sch_get_cdev(sch); 1080 cdev = sch_get_cdev(sch);
1343 if (!cdev) 1081 if (!cdev)
1344 return 0; 1082 goto out_free;
1083 io_subchannel_quiesce(sch);
1345 /* Set ccw device to not operational and drop reference. */ 1084 /* Set ccw device to not operational and drop reference. */
1346 spin_lock_irqsave(cdev->ccwlock, flags); 1085 spin_lock_irq(cdev->ccwlock);
1347 sch_set_cdev(sch, NULL); 1086 sch_set_cdev(sch, NULL);
1348 cdev->private->state = DEV_STATE_NOT_OPER; 1087 cdev->private->state = DEV_STATE_NOT_OPER;
1349 spin_unlock_irqrestore(cdev->ccwlock, flags); 1088 spin_unlock_irq(cdev->ccwlock);
1350 ccw_device_unregister(cdev); 1089 ccw_device_unregister(cdev);
1090out_free:
1351 kfree(sch->private); 1091 kfree(sch->private);
1352 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1092 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1353 return 0; 1093 return 0;
1354} 1094}
1355 1095
1356static int io_subchannel_notify(struct subchannel *sch, int event)
1357{
1358 struct ccw_device *cdev;
1359
1360 cdev = sch_get_cdev(sch);
1361 if (!cdev)
1362 return 0;
1363 return ccw_device_notify(cdev, event);
1364}
1365
1366static void io_subchannel_verify(struct subchannel *sch) 1096static void io_subchannel_verify(struct subchannel *sch)
1367{ 1097{
1368 struct ccw_device *cdev; 1098 struct ccw_device *cdev;
@@ -1372,36 +1102,6 @@ static void io_subchannel_verify(struct subchannel *sch)
1372 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1102 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1373} 1103}
1374 1104
1375static int check_for_io_on_path(struct subchannel *sch, int mask)
1376{
1377 if (cio_update_schib(sch))
1378 return 0;
1379 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask)
1380 return 1;
1381 return 0;
1382}
1383
1384static void terminate_internal_io(struct subchannel *sch,
1385 struct ccw_device *cdev)
1386{
1387 if (cio_clear(sch)) {
1388 /* Recheck device in case clear failed. */
1389 sch->lpm = 0;
1390 if (cdev->online)
1391 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1392 else
1393 css_schedule_eval(sch->schid);
1394 return;
1395 }
1396 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1397 /* Request retry of internal operation. */
1398 cdev->private->flags.intretry = 1;
1399 /* Call handler. */
1400 if (cdev->handler)
1401 cdev->handler(cdev, cdev->private->intparm,
1402 ERR_PTR(-EIO));
1403}
1404
1405static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1105static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1406{ 1106{
1407 struct ccw_device *cdev; 1107 struct ccw_device *cdev;
@@ -1409,18 +1109,24 @@ static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1409 cdev = sch_get_cdev(sch); 1109 cdev = sch_get_cdev(sch);
1410 if (!cdev) 1110 if (!cdev)
1411 return; 1111 return;
1412 if (check_for_io_on_path(sch, mask)) { 1112 if (cio_update_schib(sch))
1413 if (cdev->private->state == DEV_STATE_ONLINE) 1113 goto err;
1414 ccw_device_kill_io(cdev); 1114 /* Check for I/O on path. */
1415 else { 1115 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1416 terminate_internal_io(sch, cdev); 1116 goto out;
1417 /* Re-start path verification. */ 1117 if (cdev->private->state == DEV_STATE_ONLINE) {
1418 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1118 ccw_device_kill_io(cdev);
1419 } 1119 goto out;
1420 } else 1120 }
1421 /* trigger path verification. */ 1121 if (cio_clear(sch))
1422 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1122 goto err;
1123out:
1124 /* Trigger path verification. */
1125 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1126 return;
1423 1127
1128err:
1129 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1424} 1130}
1425 1131
1426static int io_subchannel_chp_event(struct subchannel *sch, 1132static int io_subchannel_chp_event(struct subchannel *sch,
@@ -1457,46 +1163,41 @@ static int io_subchannel_chp_event(struct subchannel *sch,
1457 return 0; 1163 return 0;
1458} 1164}
1459 1165
1460static void 1166static void io_subchannel_quiesce(struct subchannel *sch)
1461io_subchannel_shutdown(struct subchannel *sch)
1462{ 1167{
1463 struct ccw_device *cdev; 1168 struct ccw_device *cdev;
1464 int ret; 1169 int ret;
1465 1170
1171 spin_lock_irq(sch->lock);
1466 cdev = sch_get_cdev(sch); 1172 cdev = sch_get_cdev(sch);
1467
1468 if (cio_is_console(sch->schid)) 1173 if (cio_is_console(sch->schid))
1469 return; 1174 goto out_unlock;
1470 if (!sch->schib.pmcw.ena) 1175 if (!sch->schib.pmcw.ena)
1471 /* Nothing to do. */ 1176 goto out_unlock;
1472 return;
1473 ret = cio_disable_subchannel(sch); 1177 ret = cio_disable_subchannel(sch);
1474 if (ret != -EBUSY) 1178 if (ret != -EBUSY)
1475 /* Subchannel is disabled, we're done. */ 1179 goto out_unlock;
1476 return;
1477 cdev->private->state = DEV_STATE_QUIESCE;
1478 if (cdev->handler) 1180 if (cdev->handler)
1479 cdev->handler(cdev, cdev->private->intparm, 1181 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1480 ERR_PTR(-EIO)); 1182 while (ret == -EBUSY) {
1481 ret = ccw_device_cancel_halt_clear(cdev); 1183 cdev->private->state = DEV_STATE_QUIESCE;
1482 if (ret == -EBUSY) { 1184 ret = ccw_device_cancel_halt_clear(cdev);
1483 ccw_device_set_timeout(cdev, HZ/10); 1185 if (ret == -EBUSY) {
1484 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1186 ccw_device_set_timeout(cdev, HZ/10);
1187 spin_unlock_irq(sch->lock);
1188 wait_event(cdev->private->wait_q,
1189 cdev->private->state != DEV_STATE_QUIESCE);
1190 spin_lock_irq(sch->lock);
1191 }
1192 ret = cio_disable_subchannel(sch);
1485 } 1193 }
1486 cio_disable_subchannel(sch); 1194out_unlock:
1195 spin_unlock_irq(sch->lock);
1487} 1196}
1488 1197
1489static int io_subchannel_get_status(struct subchannel *sch) 1198static void io_subchannel_shutdown(struct subchannel *sch)
1490{ 1199{
1491 struct schib schib; 1200 io_subchannel_quiesce(sch);
1492
1493 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
1494 return CIO_GONE;
1495 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
1496 return CIO_REVALIDATE;
1497 if (!sch->lpm)
1498 return CIO_NO_PATH;
1499 return CIO_OPER;
1500} 1201}
1501 1202
1502static int device_is_disconnected(struct ccw_device *cdev) 1203static int device_is_disconnected(struct ccw_device *cdev)
@@ -1575,20 +1276,16 @@ static void ccw_device_schedule_recovery(void)
1575static int purge_fn(struct device *dev, void *data) 1276static int purge_fn(struct device *dev, void *data)
1576{ 1277{
1577 struct ccw_device *cdev = to_ccwdev(dev); 1278 struct ccw_device *cdev = to_ccwdev(dev);
1578 struct ccw_device_private *priv = cdev->private; 1279 struct ccw_dev_id *id = &cdev->private->dev_id;
1579 int unreg;
1580 1280
1581 spin_lock_irq(cdev->ccwlock); 1281 spin_lock_irq(cdev->ccwlock);
1582 unreg = is_blacklisted(priv->dev_id.ssid, priv->dev_id.devno) && 1282 if (is_blacklisted(id->ssid, id->devno) &&
1583 (priv->state == DEV_STATE_OFFLINE); 1283 (cdev->private->state == DEV_STATE_OFFLINE)) {
1284 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1285 id->devno);
1286 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1287 }
1584 spin_unlock_irq(cdev->ccwlock); 1288 spin_unlock_irq(cdev->ccwlock);
1585 if (!unreg)
1586 goto out;
1587 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", priv->dev_id.ssid,
1588 priv->dev_id.devno);
1589 ccw_device_schedule_sch_unregister(cdev);
1590
1591out:
1592 /* Abort loop in case of pending signal. */ 1289 /* Abort loop in case of pending signal. */
1593 if (signal_pending(current)) 1290 if (signal_pending(current))
1594 return -EINTR; 1291 return -EINTR;
@@ -1630,91 +1327,169 @@ void ccw_device_set_notoper(struct ccw_device *cdev)
1630 cdev->private->state = DEV_STATE_NOT_OPER; 1327 cdev->private->state = DEV_STATE_NOT_OPER;
1631} 1328}
1632 1329
1633static int io_subchannel_sch_event(struct subchannel *sch, int slow) 1330enum io_sch_action {
1331 IO_SCH_UNREG,
1332 IO_SCH_ORPH_UNREG,
1333 IO_SCH_ATTACH,
1334 IO_SCH_UNREG_ATTACH,
1335 IO_SCH_ORPH_ATTACH,
1336 IO_SCH_REPROBE,
1337 IO_SCH_VERIFY,
1338 IO_SCH_DISC,
1339 IO_SCH_NOP,
1340};
1341
1342static enum io_sch_action sch_get_action(struct subchannel *sch)
1343{
1344 struct ccw_device *cdev;
1345
1346 cdev = sch_get_cdev(sch);
1347 if (cio_update_schib(sch)) {
1348 /* Not operational. */
1349 if (!cdev)
1350 return IO_SCH_UNREG;
1351 if (!ccw_device_notify(cdev, CIO_GONE))
1352 return IO_SCH_UNREG;
1353 return IO_SCH_ORPH_UNREG;
1354 }
1355 /* Operational. */
1356 if (!cdev)
1357 return IO_SCH_ATTACH;
1358 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1359 if (!ccw_device_notify(cdev, CIO_GONE))
1360 return IO_SCH_UNREG_ATTACH;
1361 return IO_SCH_ORPH_ATTACH;
1362 }
1363 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1364 if (!ccw_device_notify(cdev, CIO_NO_PATH))
1365 return IO_SCH_UNREG;
1366 return IO_SCH_DISC;
1367 }
1368 if (device_is_disconnected(cdev))
1369 return IO_SCH_REPROBE;
1370 if (cdev->online)
1371 return IO_SCH_VERIFY;
1372 return IO_SCH_NOP;
1373}
1374
1375/**
1376 * io_subchannel_sch_event - process subchannel event
1377 * @sch: subchannel
1378 * @process: non-zero if function is called in process context
1379 *
1380 * An unspecified event occurred for this subchannel. Adjust data according
1381 * to the current operational state of the subchannel and device. Return
1382 * zero when the event has been handled sufficiently or -EAGAIN when this
1383 * function should be called again in process context.
1384 */
1385static int io_subchannel_sch_event(struct subchannel *sch, int process)
1634{ 1386{
1635 int event, ret, disc;
1636 unsigned long flags; 1387 unsigned long flags;
1637 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE, DISC } action;
1638 struct ccw_device *cdev; 1388 struct ccw_device *cdev;
1389 struct ccw_dev_id dev_id;
1390 enum io_sch_action action;
1391 int rc = -EAGAIN;
1639 1392
1640 spin_lock_irqsave(sch->lock, flags); 1393 spin_lock_irqsave(sch->lock, flags);
1394 if (!device_is_registered(&sch->dev))
1395 goto out_unlock;
1396 if (work_pending(&sch->todo_work))
1397 goto out_unlock;
1641 cdev = sch_get_cdev(sch); 1398 cdev = sch_get_cdev(sch);
1642 disc = device_is_disconnected(cdev); 1399 if (cdev && work_pending(&cdev->private->todo_work))
1643 if (disc && slow) { 1400 goto out_unlock;
1644 /* Disconnected devices are evaluated directly only.*/ 1401 action = sch_get_action(sch);
1645 spin_unlock_irqrestore(sch->lock, flags); 1402 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1646 return 0; 1403 sch->schid.ssid, sch->schid.sch_no, process,
1647 } 1404 action);
1648 /* No interrupt after machine check - kill pending timers. */ 1405 /* Perform immediate actions while holding the lock. */
1649 if (cdev) 1406 switch (action) {
1650 ccw_device_set_timeout(cdev, 0); 1407 case IO_SCH_REPROBE:
1651 if (!disc && !slow) { 1408 /* Trigger device recognition. */
1652 /* Non-disconnected devices are evaluated on the slow path. */ 1409 ccw_device_trigger_reprobe(cdev);
1653 spin_unlock_irqrestore(sch->lock, flags); 1410 rc = 0;
1654 return -EAGAIN; 1411 goto out_unlock;
1412 case IO_SCH_VERIFY:
1413 /* Trigger path verification. */
1414 io_subchannel_verify(sch);
1415 rc = 0;
1416 goto out_unlock;
1417 case IO_SCH_DISC:
1418 ccw_device_set_disconnected(cdev);
1419 rc = 0;
1420 goto out_unlock;
1421 case IO_SCH_ORPH_UNREG:
1422 case IO_SCH_ORPH_ATTACH:
1423 ccw_device_set_disconnected(cdev);
1424 break;
1425 case IO_SCH_UNREG_ATTACH:
1426 case IO_SCH_UNREG:
1427 if (cdev)
1428 ccw_device_set_notoper(cdev);
1429 break;
1430 case IO_SCH_NOP:
1431 rc = 0;
1432 goto out_unlock;
1433 default:
1434 break;
1655 } 1435 }
1656 event = io_subchannel_get_status(sch); 1436 spin_unlock_irqrestore(sch->lock, flags);
1657 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 1437 /* All other actions require process context. */
1658 sch->schid.ssid, sch->schid.sch_no, event, 1438 if (!process)
1659 disc ? "disconnected" : "normal", 1439 goto out;
1660 slow ? "slow" : "fast"); 1440 /* Handle attached ccw device. */
1661 /* Analyze subchannel status. */ 1441 switch (action) {
1662 action = NONE; 1442 case IO_SCH_ORPH_UNREG:
1663 switch (event) { 1443 case IO_SCH_ORPH_ATTACH:
1664 case CIO_NO_PATH: 1444 /* Move ccw device to orphanage. */
1665 if (disc) { 1445 rc = ccw_device_move_to_orph(cdev);
1666 /* Check if paths have become available. */ 1446 if (rc)
1667 action = REPROBE; 1447 goto out;
1668 break;
1669 }
1670 /* fall through */
1671 case CIO_GONE:
1672 /* Ask driver what to do with device. */
1673 if (io_subchannel_notify(sch, event))
1674 action = DISC;
1675 else
1676 action = UNREGISTER;
1677 break; 1448 break;
1678 case CIO_REVALIDATE: 1449 case IO_SCH_UNREG_ATTACH:
1679 /* Device will be removed, so no notify necessary. */ 1450 /* Unregister ccw device. */
1680 if (disc) 1451 ccw_device_unregister(cdev);
1681 /* Reprobe because immediate unregister might block. */
1682 action = REPROBE;
1683 else
1684 action = UNREGISTER_PROBE;
1685 break; 1452 break;
1686 case CIO_OPER: 1453 default:
1687 if (disc)
1688 /* Get device operational again. */
1689 action = REPROBE;
1690 break; 1454 break;
1691 } 1455 }
1692 /* Perform action. */ 1456 /* Handle subchannel. */
1693 ret = 0;
1694 switch (action) { 1457 switch (action) {
1695 case UNREGISTER: 1458 case IO_SCH_ORPH_UNREG:
1696 case UNREGISTER_PROBE: 1459 case IO_SCH_UNREG:
1697 ccw_device_set_notoper(cdev);
1698 /* Unregister device (will use subchannel lock). */
1699 spin_unlock_irqrestore(sch->lock, flags);
1700 css_sch_device_unregister(sch); 1460 css_sch_device_unregister(sch);
1701 spin_lock_irqsave(sch->lock, flags);
1702 break; 1461 break;
1703 case REPROBE: 1462 case IO_SCH_ORPH_ATTACH:
1463 case IO_SCH_UNREG_ATTACH:
1464 case IO_SCH_ATTACH:
1465 dev_id.ssid = sch->schid.ssid;
1466 dev_id.devno = sch->schib.pmcw.dev;
1467 cdev = get_ccwdev_by_dev_id(&dev_id);
1468 if (!cdev) {
1469 sch_create_and_recog_new_device(sch);
1470 break;
1471 }
1472 rc = ccw_device_move_to_sch(cdev, sch);
1473 if (rc) {
1474 /* Release reference from get_ccwdev_by_dev_id() */
1475 put_device(&cdev->dev);
1476 goto out;
1477 }
1478 spin_lock_irqsave(sch->lock, flags);
1704 ccw_device_trigger_reprobe(cdev); 1479 ccw_device_trigger_reprobe(cdev);
1705 break; 1480 spin_unlock_irqrestore(sch->lock, flags);
1706 case DISC: 1481 /* Release reference from get_ccwdev_by_dev_id() */
1707 ccw_device_set_disconnected(cdev); 1482 put_device(&cdev->dev);
1708 break; 1483 break;
1709 default: 1484 default:
1710 break; 1485 break;
1711 } 1486 }
1712 spin_unlock_irqrestore(sch->lock, flags); 1487 return 0;
1713 /* Probe if necessary. */
1714 if (action == UNREGISTER_PROBE)
1715 ret = css_probe_device(sch->schid);
1716 1488
1717 return ret; 1489out_unlock:
1490 spin_unlock_irqrestore(sch->lock, flags);
1491out:
1492 return rc;
1718} 1493}
1719 1494
1720#ifdef CONFIG_CCW_CONSOLE 1495#ifdef CONFIG_CCW_CONSOLE
@@ -1744,10 +1519,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1744 sch->driver = &io_subchannel_driver; 1519 sch->driver = &io_subchannel_driver;
1745 /* Initialize the ccw_device structure. */ 1520 /* Initialize the ccw_device structure. */
1746 cdev->dev.parent= &sch->dev; 1521 cdev->dev.parent= &sch->dev;
1747 rc = io_subchannel_recog(cdev, sch); 1522 io_subchannel_recog(cdev, sch);
1748 if (rc)
1749 return rc;
1750
1751 /* Now wait for the async. recognition to come to an end. */ 1523 /* Now wait for the async. recognition to come to an end. */
1752 spin_lock_irq(cdev->ccwlock); 1524 spin_lock_irq(cdev->ccwlock);
1753 while (!dev_fsm_final_state(cdev)) 1525 while (!dev_fsm_final_state(cdev))
@@ -1763,7 +1535,7 @@ static int ccw_device_console_enable(struct ccw_device *cdev,
1763 rc = 0; 1535 rc = 0;
1764out_unlock: 1536out_unlock:
1765 spin_unlock_irq(cdev->ccwlock); 1537 spin_unlock_irq(cdev->ccwlock);
1766 return 0; 1538 return rc;
1767} 1539}
1768 1540
1769struct ccw_device * 1541struct ccw_device *
@@ -1919,7 +1691,7 @@ static int ccw_device_pm_prepare(struct device *dev)
1919{ 1691{
1920 struct ccw_device *cdev = to_ccwdev(dev); 1692 struct ccw_device *cdev = to_ccwdev(dev);
1921 1693
1922 if (work_pending(&cdev->private->kick_work)) 1694 if (work_pending(&cdev->private->todo_work))
1923 return -EAGAIN; 1695 return -EAGAIN;
1924 /* Fail while device is being set online/offline. */ 1696 /* Fail while device is being set online/offline. */
1925 if (atomic_read(&cdev->private->onoff)) 1697 if (atomic_read(&cdev->private->onoff))
@@ -2005,7 +1777,6 @@ static int ccw_device_pm_thaw(struct device *dev)
2005static void __ccw_device_pm_restore(struct ccw_device *cdev) 1777static void __ccw_device_pm_restore(struct ccw_device *cdev)
2006{ 1778{
2007 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1779 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2008 int ret;
2009 1780
2010 if (cio_is_console(sch->schid)) 1781 if (cio_is_console(sch->schid))
2011 goto out; 1782 goto out;
@@ -2015,22 +1786,10 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
2015 */ 1786 */
2016 spin_lock_irq(sch->lock); 1787 spin_lock_irq(sch->lock);
2017 cdev->private->flags.resuming = 1; 1788 cdev->private->flags.resuming = 1;
2018 ret = ccw_device_recognition(cdev); 1789 ccw_device_recognition(cdev);
2019 spin_unlock_irq(sch->lock); 1790 spin_unlock_irq(sch->lock);
2020 if (ret) {
2021 CIO_MSG_EVENT(0, "Couldn't start recognition for device "
2022 "0.%x.%04x (ret=%d)\n",
2023 cdev->private->dev_id.ssid,
2024 cdev->private->dev_id.devno, ret);
2025 spin_lock_irq(sch->lock);
2026 cdev->private->state = DEV_STATE_DISCONNECTED;
2027 spin_unlock_irq(sch->lock);
2028 /* notify driver after the resume cb */
2029 goto out;
2030 }
2031 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1791 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
2032 cdev->private->state == DEV_STATE_DISCONNECTED); 1792 cdev->private->state == DEV_STATE_DISCONNECTED);
2033
2034out: 1793out:
2035 cdev->private->flags.resuming = 0; 1794 cdev->private->flags.resuming = 0;
2036} 1795}
@@ -2040,7 +1799,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
2040 cdev->private->state = DEV_STATE_BOXED; 1799 cdev->private->state = DEV_STATE_BOXED;
2041 if (ccw_device_notify(cdev, CIO_BOXED)) 1800 if (ccw_device_notify(cdev, CIO_BOXED))
2042 return 0; 1801 return 0;
2043 ccw_device_schedule_sch_unregister(cdev); 1802 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2044 return -ENODEV; 1803 return -ENODEV;
2045} 1804}
2046 1805
@@ -2049,7 +1808,7 @@ static int resume_handle_disc(struct ccw_device *cdev)
2049 cdev->private->state = DEV_STATE_DISCONNECTED; 1808 cdev->private->state = DEV_STATE_DISCONNECTED;
2050 if (ccw_device_notify(cdev, CIO_GONE)) 1809 if (ccw_device_notify(cdev, CIO_GONE))
2051 return 0; 1810 return 0;
2052 ccw_device_schedule_sch_unregister(cdev); 1811 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
2053 return -ENODEV; 1812 return -ENODEV;
2054} 1813}
2055 1814
@@ -2094,9 +1853,7 @@ static int ccw_device_pm_restore(struct device *dev)
2094 /* check if the device type has changed */ 1853 /* check if the device type has changed */
2095 if (!ccw_device_test_sense_data(cdev)) { 1854 if (!ccw_device_test_sense_data(cdev)) {
2096 ccw_device_update_sense_data(cdev); 1855 ccw_device_update_sense_data(cdev);
2097 PREPARE_WORK(&cdev->private->kick_work, 1856 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
2098 ccw_device_do_unbind_bind);
2099 queue_work(ccw_device_work, &cdev->private->kick_work);
2100 ret = -ENODEV; 1857 ret = -ENODEV;
2101 goto out_unlock; 1858 goto out_unlock;
2102 } 1859 }
@@ -2140,7 +1897,7 @@ out_disc_unlock:
2140 goto out_restore; 1897 goto out_restore;
2141 1898
2142out_unreg_unlock: 1899out_unreg_unlock:
2143 ccw_device_schedule_sch_unregister(cdev); 1900 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
2144 ret = -ENODEV; 1901 ret = -ENODEV;
2145out_unlock: 1902out_unlock:
2146 spin_unlock_irq(sch->lock); 1903 spin_unlock_irq(sch->lock);
@@ -2205,6 +1962,77 @@ ccw_device_get_subchannel_id(struct ccw_device *cdev)
2205 return sch->schid; 1962 return sch->schid;
2206} 1963}
2207 1964
1965static void ccw_device_todo(struct work_struct *work)
1966{
1967 struct ccw_device_private *priv;
1968 struct ccw_device *cdev;
1969 struct subchannel *sch;
1970 enum cdev_todo todo;
1971
1972 priv = container_of(work, struct ccw_device_private, todo_work);
1973 cdev = priv->cdev;
1974 sch = to_subchannel(cdev->dev.parent);
1975 /* Find out todo. */
1976 spin_lock_irq(cdev->ccwlock);
1977 todo = priv->todo;
1978 priv->todo = CDEV_TODO_NOTHING;
1979 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1980 priv->dev_id.ssid, priv->dev_id.devno, todo);
1981 spin_unlock_irq(cdev->ccwlock);
1982 /* Perform todo. */
1983 switch (todo) {
1984 case CDEV_TODO_ENABLE_CMF:
1985 cmf_reenable(cdev);
1986 break;
1987 case CDEV_TODO_REBIND:
1988 ccw_device_do_unbind_bind(cdev);
1989 break;
1990 case CDEV_TODO_REGISTER:
1991 io_subchannel_register(cdev);
1992 break;
1993 case CDEV_TODO_UNREG_EVAL:
1994 if (!sch_is_pseudo_sch(sch))
1995 css_schedule_eval(sch->schid);
1996 /* fall-through */
1997 case CDEV_TODO_UNREG:
1998 if (sch_is_pseudo_sch(sch))
1999 ccw_device_unregister(cdev);
2000 else
2001 ccw_device_call_sch_unregister(cdev);
2002 break;
2003 default:
2004 break;
2005 }
2006 /* Release workqueue ref. */
2007 put_device(&cdev->dev);
2008}
2009
2010/**
2011 * ccw_device_sched_todo - schedule ccw device operation
2012 * @cdev: ccw device
2013 * @todo: todo
2014 *
2015 * Schedule the operation identified by @todo to be performed on the slow path
2016 * workqueue. Do nothing if another operation with higher priority is already
2017 * scheduled. Needs to be called with ccwdev lock held.
2018 */
2019void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2020{
2021 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2022 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2023 todo);
2024 if (cdev->private->todo >= todo)
2025 return;
2026 cdev->private->todo = todo;
2027 /* Get workqueue ref. */
2028 if (!get_device(&cdev->dev))
2029 return;
2030 if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
2031 /* Already queued, release workqueue ref. */
2032 put_device(&cdev->dev);
2033 }
2034}
2035
2208MODULE_LICENSE("GPL"); 2036MODULE_LICENSE("GPL");
2209EXPORT_SYMBOL(ccw_device_set_online); 2037EXPORT_SYMBOL(ccw_device_set_online);
2210EXPORT_SYMBOL(ccw_device_set_offline); 2038EXPORT_SYMBOL(ccw_device_set_offline);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 246c6482842c..bcfe13e42638 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -21,7 +21,6 @@ enum dev_state {
21 DEV_STATE_DISBAND_PGID, 21 DEV_STATE_DISBAND_PGID,
22 DEV_STATE_BOXED, 22 DEV_STATE_BOXED,
23 /* states to wait for i/o completion before doing something */ 23 /* states to wait for i/o completion before doing something */
24 DEV_STATE_CLEAR_VERIFY,
25 DEV_STATE_TIMEOUT_KILL, 24 DEV_STATE_TIMEOUT_KILL,
26 DEV_STATE_QUIESCE, 25 DEV_STATE_QUIESCE,
27 /* special states for devices gone not operational */ 26 /* special states for devices gone not operational */
@@ -29,6 +28,7 @@ enum dev_state {
29 DEV_STATE_DISCONNECTED_SENSE_ID, 28 DEV_STATE_DISCONNECTED_SENSE_ID,
30 DEV_STATE_CMFCHANGE, 29 DEV_STATE_CMFCHANGE,
31 DEV_STATE_CMFUPDATE, 30 DEV_STATE_CMFUPDATE,
31 DEV_STATE_STEAL_LOCK,
32 /* last element! */ 32 /* last element! */
33 NR_DEV_STATES 33 NR_DEV_STATES
34}; 34};
@@ -81,17 +81,16 @@ void io_subchannel_init_config(struct subchannel *sch);
81 81
82int ccw_device_cancel_halt_clear(struct ccw_device *); 82int ccw_device_cancel_halt_clear(struct ccw_device *);
83 83
84void ccw_device_do_unbind_bind(struct work_struct *);
85void ccw_device_move_to_orphanage(struct work_struct *);
86int ccw_device_is_orphan(struct ccw_device *); 84int ccw_device_is_orphan(struct ccw_device *);
87 85
88int ccw_device_recognition(struct ccw_device *); 86void ccw_device_recognition(struct ccw_device *);
89int ccw_device_online(struct ccw_device *); 87int ccw_device_online(struct ccw_device *);
90int ccw_device_offline(struct ccw_device *); 88int ccw_device_offline(struct ccw_device *);
91void ccw_device_update_sense_data(struct ccw_device *); 89void ccw_device_update_sense_data(struct ccw_device *);
92int ccw_device_test_sense_data(struct ccw_device *); 90int ccw_device_test_sense_data(struct ccw_device *);
93void ccw_device_schedule_sch_unregister(struct ccw_device *); 91void ccw_device_schedule_sch_unregister(struct ccw_device *);
94int ccw_purge_blacklisted(void); 92int ccw_purge_blacklisted(void);
93void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
95 94
96/* Function prototypes for device status and basic sense stuff. */ 95/* Function prototypes for device status and basic sense stuff. */
97void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); 96void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
@@ -99,24 +98,28 @@ void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
99int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *); 98int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
100int ccw_device_do_sense(struct ccw_device *, struct irb *); 99int ccw_device_do_sense(struct ccw_device *, struct irb *);
101 100
101/* Function prototype for internal request handling. */
102int lpm_adjust(int lpm, int mask);
103void ccw_request_start(struct ccw_device *);
104int ccw_request_cancel(struct ccw_device *cdev);
105void ccw_request_handler(struct ccw_device *cdev);
106void ccw_request_timeout(struct ccw_device *cdev);
107void ccw_request_notoper(struct ccw_device *cdev);
108
102/* Function prototypes for sense id stuff. */ 109/* Function prototypes for sense id stuff. */
103void ccw_device_sense_id_start(struct ccw_device *); 110void ccw_device_sense_id_start(struct ccw_device *);
104void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event);
105void ccw_device_sense_id_done(struct ccw_device *, int); 111void ccw_device_sense_id_done(struct ccw_device *, int);
106 112
107/* Function prototypes for path grouping stuff. */ 113/* Function prototypes for path grouping stuff. */
108void ccw_device_sense_pgid_start(struct ccw_device *);
109void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event);
110void ccw_device_sense_pgid_done(struct ccw_device *, int);
111
112void ccw_device_verify_start(struct ccw_device *); 114void ccw_device_verify_start(struct ccw_device *);
113void ccw_device_verify_irq(struct ccw_device *, enum dev_event);
114void ccw_device_verify_done(struct ccw_device *, int); 115void ccw_device_verify_done(struct ccw_device *, int);
115 116
116void ccw_device_disband_start(struct ccw_device *); 117void ccw_device_disband_start(struct ccw_device *);
117void ccw_device_disband_irq(struct ccw_device *, enum dev_event);
118void ccw_device_disband_done(struct ccw_device *, int); 118void ccw_device_disband_done(struct ccw_device *, int);
119 119
120void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
121void ccw_device_stlck_done(struct ccw_device *, void *, int);
122
120int ccw_device_call_handler(struct ccw_device *); 123int ccw_device_call_handler(struct ccw_device *);
121 124
122int ccw_device_stlck(struct ccw_device *); 125int ccw_device_stlck(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index b9613d7df9ef..ae760658a131 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -229,8 +229,8 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
229 229
230 sch = to_subchannel(cdev->dev.parent); 230 sch = to_subchannel(cdev->dev.parent);
231 231
232 ccw_device_set_timeout(cdev, 0); 232 if (cio_disable_subchannel(sch))
233 cio_disable_subchannel(sch); 233 state = DEV_STATE_NOT_OPER;
234 /* 234 /*
235 * Now that we tried recognition, we have performed device selection 235 * Now that we tried recognition, we have performed device selection
236 * through ssch() and the path information is up to date. 236 * through ssch() and the path information is up to date.
@@ -263,22 +263,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
263 } 263 }
264 switch (state) { 264 switch (state) {
265 case DEV_STATE_NOT_OPER: 265 case DEV_STATE_NOT_OPER:
266 CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
267 "subchannel 0.%x.%04x\n",
268 cdev->private->dev_id.devno,
269 sch->schid.ssid, sch->schid.sch_no);
270 break; 266 break;
271 case DEV_STATE_OFFLINE: 267 case DEV_STATE_OFFLINE:
272 if (!cdev->online) { 268 if (!cdev->online) {
273 ccw_device_update_sense_data(cdev); 269 ccw_device_update_sense_data(cdev);
274 /* Issue device info message. */
275 CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
276 "CU Type/Mod = %04X/%02X, Dev Type/Mod "
277 "= %04X/%02X\n",
278 cdev->private->dev_id.ssid,
279 cdev->private->dev_id.devno,
280 cdev->id.cu_type, cdev->id.cu_model,
281 cdev->id.dev_type, cdev->id.dev_model);
282 break; 270 break;
283 } 271 }
284 cdev->private->state = DEV_STATE_OFFLINE; 272 cdev->private->state = DEV_STATE_OFFLINE;
@@ -289,16 +277,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
289 wake_up(&cdev->private->wait_q); 277 wake_up(&cdev->private->wait_q);
290 } else { 278 } else {
291 ccw_device_update_sense_data(cdev); 279 ccw_device_update_sense_data(cdev);
292 PREPARE_WORK(&cdev->private->kick_work, 280 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
293 ccw_device_do_unbind_bind);
294 queue_work(ccw_device_work, &cdev->private->kick_work);
295 } 281 }
296 return; 282 return;
297 case DEV_STATE_BOXED: 283 case DEV_STATE_BOXED:
298 CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
299 " subchannel 0.%x.%04x\n",
300 cdev->private->dev_id.devno,
301 sch->schid.ssid, sch->schid.sch_no);
302 if (cdev->id.cu_type != 0) { /* device was recognized before */ 284 if (cdev->id.cu_type != 0) { /* device was recognized before */
303 cdev->private->flags.recog_done = 1; 285 cdev->private->flags.recog_done = 1;
304 cdev->private->state = DEV_STATE_BOXED; 286 cdev->private->state = DEV_STATE_BOXED;
@@ -343,28 +325,16 @@ int ccw_device_notify(struct ccw_device *cdev, int event)
343 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 325 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
344} 326}
345 327
346static void cmf_reenable_delayed(struct work_struct *work)
347{
348 struct ccw_device_private *priv;
349 struct ccw_device *cdev;
350
351 priv = container_of(work, struct ccw_device_private, kick_work);
352 cdev = priv->cdev;
353 cmf_reenable(cdev);
354}
355
356static void ccw_device_oper_notify(struct ccw_device *cdev) 328static void ccw_device_oper_notify(struct ccw_device *cdev)
357{ 329{
358 if (ccw_device_notify(cdev, CIO_OPER)) { 330 if (ccw_device_notify(cdev, CIO_OPER)) {
359 /* Reenable channel measurements, if needed. */ 331 /* Reenable channel measurements, if needed. */
360 PREPARE_WORK(&cdev->private->kick_work, cmf_reenable_delayed); 332 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
361 queue_work(ccw_device_work, &cdev->private->kick_work);
362 return; 333 return;
363 } 334 }
364 /* Driver doesn't want device back. */ 335 /* Driver doesn't want device back. */
365 ccw_device_set_notoper(cdev); 336 ccw_device_set_notoper(cdev);
366 PREPARE_WORK(&cdev->private->kick_work, ccw_device_do_unbind_bind); 337 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
367 queue_work(ccw_device_work, &cdev->private->kick_work);
368} 338}
369 339
370/* 340/*
@@ -392,14 +362,14 @@ ccw_device_done(struct ccw_device *cdev, int state)
392 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 362 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
393 cdev->private->dev_id.devno, sch->schid.sch_no); 363 cdev->private->dev_id.devno, sch->schid.sch_no);
394 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 364 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED))
395 ccw_device_schedule_sch_unregister(cdev); 365 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
396 cdev->private->flags.donotify = 0; 366 cdev->private->flags.donotify = 0;
397 break; 367 break;
398 case DEV_STATE_NOT_OPER: 368 case DEV_STATE_NOT_OPER:
399 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 369 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
400 cdev->private->dev_id.devno, sch->schid.sch_no); 370 cdev->private->dev_id.devno, sch->schid.sch_no);
401 if (!ccw_device_notify(cdev, CIO_GONE)) 371 if (!ccw_device_notify(cdev, CIO_GONE))
402 ccw_device_schedule_sch_unregister(cdev); 372 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
403 else 373 else
404 ccw_device_set_disconnected(cdev); 374 ccw_device_set_disconnected(cdev);
405 cdev->private->flags.donotify = 0; 375 cdev->private->flags.donotify = 0;
@@ -409,7 +379,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
409 "%04x\n", cdev->private->dev_id.devno, 379 "%04x\n", cdev->private->dev_id.devno,
410 sch->schid.sch_no); 380 sch->schid.sch_no);
411 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 381 if (!ccw_device_notify(cdev, CIO_NO_PATH))
412 ccw_device_schedule_sch_unregister(cdev); 382 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
413 else 383 else
414 ccw_device_set_disconnected(cdev); 384 ccw_device_set_disconnected(cdev);
415 cdev->private->flags.donotify = 0; 385 cdev->private->flags.donotify = 0;
@@ -425,107 +395,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
425 wake_up(&cdev->private->wait_q); 395 wake_up(&cdev->private->wait_q);
426} 396}
427 397
428static int cmp_pgid(struct pgid *p1, struct pgid *p2)
429{
430 char *c1;
431 char *c2;
432
433 c1 = (char *)p1;
434 c2 = (char *)p2;
435
436 return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
437}
438
439static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
440{
441 int i;
442 int last;
443
444 last = 0;
445 for (i = 0; i < 8; i++) {
446 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
447 /* No PGID yet */
448 continue;
449 if (cdev->private->pgid[last].inf.ps.state1 ==
450 SNID_STATE1_RESET) {
451 /* First non-zero PGID */
452 last = i;
453 continue;
454 }
455 if (cmp_pgid(&cdev->private->pgid[i],
456 &cdev->private->pgid[last]) == 0)
457 /* Non-conflicting PGIDs */
458 continue;
459
460 /* PGID mismatch, can't pathgroup. */
461 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
462 "0.%x.%04x, can't pathgroup\n",
463 cdev->private->dev_id.ssid,
464 cdev->private->dev_id.devno);
465 cdev->private->options.pgroup = 0;
466 return;
467 }
468 if (cdev->private->pgid[last].inf.ps.state1 ==
469 SNID_STATE1_RESET)
470 /* No previous pgid found */
471 memcpy(&cdev->private->pgid[0],
472 &channel_subsystems[0]->global_pgid,
473 sizeof(struct pgid));
474 else
475 /* Use existing pgid */
476 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
477 sizeof(struct pgid));
478}
479
480/*
481 * Function called from device_pgid.c after sense path ground has completed.
482 */
483void
484ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
485{
486 struct subchannel *sch;
487
488 sch = to_subchannel(cdev->dev.parent);
489 switch (err) {
490 case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
491 cdev->private->options.pgroup = 0;
492 break;
493 case 0: /* success */
494 case -EACCES: /* partial success, some paths not operational */
495 /* Check if all pgids are equal or 0. */
496 __ccw_device_get_common_pgid(cdev);
497 break;
498 case -ETIME: /* Sense path group id stopped by timeout. */
499 case -EUSERS: /* device is reserved for someone else. */
500 ccw_device_done(cdev, DEV_STATE_BOXED);
501 return;
502 default:
503 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
504 return;
505 }
506 /* Start Path Group verification. */
507 cdev->private->state = DEV_STATE_VERIFY;
508 cdev->private->flags.doverify = 0;
509 ccw_device_verify_start(cdev);
510}
511
512/* 398/*
513 * Start device recognition. 399 * Start device recognition.
514 */ 400 */
515int 401void ccw_device_recognition(struct ccw_device *cdev)
516ccw_device_recognition(struct ccw_device *cdev)
517{ 402{
518 struct subchannel *sch; 403 struct subchannel *sch = to_subchannel(cdev->dev.parent);
519 int ret;
520
521 sch = to_subchannel(cdev->dev.parent);
522 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
523 if (ret != 0)
524 /* Couldn't enable the subchannel for i/o. Sick device. */
525 return ret;
526
527 /* After 60s the device recognition is considered to have failed. */
528 ccw_device_set_timeout(cdev, 60*HZ);
529 404
530 /* 405 /*
531 * We used to start here with a sense pgid to find out whether a device 406 * We used to start here with a sense pgid to find out whether a device
@@ -537,32 +412,33 @@ ccw_device_recognition(struct ccw_device *cdev)
537 */ 412 */
538 cdev->private->flags.recog_done = 0; 413 cdev->private->flags.recog_done = 0;
539 cdev->private->state = DEV_STATE_SENSE_ID; 414 cdev->private->state = DEV_STATE_SENSE_ID;
415 if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
416 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
417 return;
418 }
540 ccw_device_sense_id_start(cdev); 419 ccw_device_sense_id_start(cdev);
541 return 0;
542} 420}
543 421
544/* 422/*
545 * Handle timeout in device recognition. 423 * Handle events for states that use the ccw request infrastructure.
546 */ 424 */
547static void 425static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
548ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
549{ 426{
550 int ret; 427 switch (e) {
551 428 case DEV_EVENT_NOTOPER:
552 ret = ccw_device_cancel_halt_clear(cdev); 429 ccw_request_notoper(cdev);
553 switch (ret) {
554 case 0:
555 ccw_device_recog_done(cdev, DEV_STATE_BOXED);
556 break; 430 break;
557 case -ENODEV: 431 case DEV_EVENT_INTERRUPT:
558 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 432 ccw_request_handler(cdev);
433 break;
434 case DEV_EVENT_TIMEOUT:
435 ccw_request_timeout(cdev);
559 break; 436 break;
560 default: 437 default:
561 ccw_device_set_timeout(cdev, 3*HZ); 438 break;
562 } 439 }
563} 440}
564 441
565
566void 442void
567ccw_device_verify_done(struct ccw_device *cdev, int err) 443ccw_device_verify_done(struct ccw_device *cdev, int err)
568{ 444{
@@ -571,21 +447,18 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
571 sch = to_subchannel(cdev->dev.parent); 447 sch = to_subchannel(cdev->dev.parent);
572 /* Update schib - pom may have changed. */ 448 /* Update schib - pom may have changed. */
573 if (cio_update_schib(sch)) { 449 if (cio_update_schib(sch)) {
574 cdev->private->flags.donotify = 0; 450 err = -ENODEV;
575 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 451 goto callback;
576 return;
577 } 452 }
578 /* Update lpm with verified path mask. */ 453 /* Update lpm with verified path mask. */
579 sch->lpm = sch->vpm; 454 sch->lpm = sch->vpm;
580 /* Repeat path verification? */ 455 /* Repeat path verification? */
581 if (cdev->private->flags.doverify) { 456 if (cdev->private->flags.doverify) {
582 cdev->private->flags.doverify = 0;
583 ccw_device_verify_start(cdev); 457 ccw_device_verify_start(cdev);
584 return; 458 return;
585 } 459 }
460callback:
586 switch (err) { 461 switch (err) {
587 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
588 cdev->private->options.pgroup = 0;
589 case 0: 462 case 0:
590 ccw_device_done(cdev, DEV_STATE_ONLINE); 463 ccw_device_done(cdev, DEV_STATE_ONLINE);
591 /* Deliver fake irb to device driver, if needed. */ 464 /* Deliver fake irb to device driver, if needed. */
@@ -604,18 +477,20 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
604 } 477 }
605 break; 478 break;
606 case -ETIME: 479 case -ETIME:
480 case -EUSERS:
607 /* Reset oper notify indication after verify error. */ 481 /* Reset oper notify indication after verify error. */
608 cdev->private->flags.donotify = 0; 482 cdev->private->flags.donotify = 0;
609 ccw_device_done(cdev, DEV_STATE_BOXED); 483 ccw_device_done(cdev, DEV_STATE_BOXED);
610 break; 484 break;
485 case -EACCES:
486 /* Reset oper notify indication after verify error. */
487 cdev->private->flags.donotify = 0;
488 ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
489 break;
611 default: 490 default:
612 /* Reset oper notify indication after verify error. */ 491 /* Reset oper notify indication after verify error. */
613 cdev->private->flags.donotify = 0; 492 cdev->private->flags.donotify = 0;
614 if (cdev->online) { 493 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
615 ccw_device_set_timeout(cdev, 0);
616 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
617 } else
618 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
619 break; 494 break;
620 } 495 }
621} 496}
@@ -640,17 +515,9 @@ ccw_device_online(struct ccw_device *cdev)
640 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 515 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
641 return ret; 516 return ret;
642 } 517 }
643 /* Do we want to do path grouping? */ 518 /* Start initial path verification. */
644 if (!cdev->private->options.pgroup) { 519 cdev->private->state = DEV_STATE_VERIFY;
645 /* Start initial path verification. */ 520 ccw_device_verify_start(cdev);
646 cdev->private->state = DEV_STATE_VERIFY;
647 cdev->private->flags.doverify = 0;
648 ccw_device_verify_start(cdev);
649 return 0;
650 }
651 /* Do a SensePGID first. */
652 cdev->private->state = DEV_STATE_SENSE_PGID;
653 ccw_device_sense_pgid_start(cdev);
654 return 0; 521 return 0;
655} 522}
656 523
@@ -666,7 +533,6 @@ ccw_device_disband_done(struct ccw_device *cdev, int err)
666 break; 533 break;
667 default: 534 default:
668 cdev->private->flags.donotify = 0; 535 cdev->private->flags.donotify = 0;
669 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
670 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 536 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
671 break; 537 break;
672 } 538 }
@@ -703,7 +569,7 @@ ccw_device_offline(struct ccw_device *cdev)
703 if (cdev->private->state != DEV_STATE_ONLINE) 569 if (cdev->private->state != DEV_STATE_ONLINE)
704 return -EINVAL; 570 return -EINVAL;
705 /* Are we doing path grouping? */ 571 /* Are we doing path grouping? */
706 if (!cdev->private->options.pgroup) { 572 if (!cdev->private->flags.pgroup) {
707 /* No, set state offline immediately. */ 573 /* No, set state offline immediately. */
708 ccw_device_done(cdev, DEV_STATE_OFFLINE); 574 ccw_device_done(cdev, DEV_STATE_OFFLINE);
709 return 0; 575 return 0;
@@ -715,43 +581,13 @@ ccw_device_offline(struct ccw_device *cdev)
715} 581}
716 582
717/* 583/*
718 * Handle timeout in device online/offline process.
719 */
720static void
721ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
722{
723 int ret;
724
725 ret = ccw_device_cancel_halt_clear(cdev);
726 switch (ret) {
727 case 0:
728 ccw_device_done(cdev, DEV_STATE_BOXED);
729 break;
730 case -ENODEV:
731 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
732 break;
733 default:
734 ccw_device_set_timeout(cdev, 3*HZ);
735 }
736}
737
738/*
739 * Handle not oper event in device recognition.
740 */
741static void
742ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
743{
744 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
745}
746
747/*
748 * Handle not operational event in non-special state. 584 * Handle not operational event in non-special state.
749 */ 585 */
750static void ccw_device_generic_notoper(struct ccw_device *cdev, 586static void ccw_device_generic_notoper(struct ccw_device *cdev,
751 enum dev_event dev_event) 587 enum dev_event dev_event)
752{ 588{
753 if (!ccw_device_notify(cdev, CIO_GONE)) 589 if (!ccw_device_notify(cdev, CIO_GONE))
754 ccw_device_schedule_sch_unregister(cdev); 590 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
755 else 591 else
756 ccw_device_set_disconnected(cdev); 592 ccw_device_set_disconnected(cdev);
757} 593}
@@ -802,11 +638,27 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
802 } 638 }
803 /* Device is idle, we can do the path verification. */ 639 /* Device is idle, we can do the path verification. */
804 cdev->private->state = DEV_STATE_VERIFY; 640 cdev->private->state = DEV_STATE_VERIFY;
805 cdev->private->flags.doverify = 0;
806 ccw_device_verify_start(cdev); 641 ccw_device_verify_start(cdev);
807} 642}
808 643
809/* 644/*
645 * Handle path verification event in boxed state.
646 */
647static void ccw_device_boxed_verify(struct ccw_device *cdev,
648 enum dev_event dev_event)
649{
650 struct subchannel *sch = to_subchannel(cdev->dev.parent);
651
652 if (cdev->online) {
653 if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
654 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
655 else
656 ccw_device_online_verify(cdev, dev_event);
657 } else
658 css_schedule_eval(sch->schid);
659}
660
661/*
810 * Got an interrupt for a normal io (state online). 662 * Got an interrupt for a normal io (state online).
811 */ 663 */
812static void 664static void
@@ -904,12 +756,6 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
904 */ 756 */
905 if (scsw_fctl(&irb->scsw) & 757 if (scsw_fctl(&irb->scsw) &
906 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 758 (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
907 /* Retry Basic Sense if requested. */
908 if (cdev->private->flags.intretry) {
909 cdev->private->flags.intretry = 0;
910 ccw_device_do_sense(cdev, irb);
911 return;
912 }
913 cdev->private->flags.dosense = 0; 759 cdev->private->flags.dosense = 0;
914 memset(&cdev->private->irb, 0, sizeof(struct irb)); 760 memset(&cdev->private->irb, 0, sizeof(struct irb));
915 ccw_device_accumulate_irb(cdev, irb); 761 ccw_device_accumulate_irb(cdev, irb);
@@ -933,21 +779,6 @@ call_handler:
933} 779}
934 780
935static void 781static void
936ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
937{
938 struct irb *irb;
939
940 irb = (struct irb *) __LC_IRB;
941 /* Accumulate status. We don't do basic sense. */
942 ccw_device_accumulate_irb(cdev, irb);
943 /* Remember to clear irb to avoid residuals. */
944 memset(&cdev->private->irb, 0, sizeof(struct irb));
945 /* Try to start delayed device verification. */
946 ccw_device_online_verify(cdev, 0);
947 /* Note: Don't call handler for cio initiated clear! */
948}
949
950static void
951ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 782ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
952{ 783{
953 struct subchannel *sch; 784 struct subchannel *sch;
@@ -1004,32 +835,6 @@ ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1004} 835}
1005 836
1006static void 837static void
1007ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1008{
1009 struct irb *irb;
1010
1011 switch (dev_event) {
1012 case DEV_EVENT_INTERRUPT:
1013 irb = (struct irb *) __LC_IRB;
1014 /* Check for unsolicited interrupt. */
1015 if ((scsw_stctl(&irb->scsw) ==
1016 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1017 (!scsw_cc(&irb->scsw)))
1018 /* FIXME: we should restart stlck here, but this
1019 * is extremely unlikely ... */
1020 goto out_wakeup;
1021
1022 ccw_device_accumulate_irb(cdev, irb);
1023 /* We don't care about basic sense etc. */
1024 break;
1025 default: /* timeout */
1026 break;
1027 }
1028out_wakeup:
1029 wake_up(&cdev->private->wait_q);
1030}
1031
1032static void
1033ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 838ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1034{ 839{
1035 struct subchannel *sch; 840 struct subchannel *sch;
@@ -1038,10 +843,6 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1038 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) 843 if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1039 /* Couldn't enable the subchannel for i/o. Sick device. */ 844 /* Couldn't enable the subchannel for i/o. Sick device. */
1040 return; 845 return;
1041
1042 /* After 60s the device recognition is considered to have failed. */
1043 ccw_device_set_timeout(cdev, 60*HZ);
1044
1045 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 846 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1046 ccw_device_sense_id_start(cdev); 847 ccw_device_sense_id_start(cdev);
1047} 848}
@@ -1072,22 +873,20 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev)
1072 873
1073 /* We should also udate ssd info, but this has to wait. */ 874 /* We should also udate ssd info, but this has to wait. */
1074 /* Check if this is another device which appeared on the same sch. */ 875 /* Check if this is another device which appeared on the same sch. */
1075 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 876 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
1076 PREPARE_WORK(&cdev->private->kick_work, 877 css_schedule_eval(sch->schid);
1077 ccw_device_move_to_orphanage); 878 else
1078 queue_work(slow_path_wq, &cdev->private->kick_work);
1079 } else
1080 ccw_device_start_id(cdev, 0); 879 ccw_device_start_id(cdev, 0);
1081} 880}
1082 881
1083static void 882static void ccw_device_disabled_irq(struct ccw_device *cdev,
1084ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) 883 enum dev_event dev_event)
1085{ 884{
1086 struct subchannel *sch; 885 struct subchannel *sch;
1087 886
1088 sch = to_subchannel(cdev->dev.parent); 887 sch = to_subchannel(cdev->dev.parent);
1089 /* 888 /*
1090 * An interrupt in state offline means a previous disable was not 889 * An interrupt in a disabled state means a previous disable was not
1091 * successful - should not happen, but we try to disable again. 890 * successful - should not happen, but we try to disable again.
1092 */ 891 */
1093 cio_disable_subchannel(sch); 892 cio_disable_subchannel(sch);
@@ -1113,10 +912,7 @@ static void
1113ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 912ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1114{ 913{
1115 ccw_device_set_timeout(cdev, 0); 914 ccw_device_set_timeout(cdev, 0);
1116 if (dev_event == DEV_EVENT_NOTOPER) 915 cdev->private->state = DEV_STATE_NOT_OPER;
1117 cdev->private->state = DEV_STATE_NOT_OPER;
1118 else
1119 cdev->private->state = DEV_STATE_OFFLINE;
1120 wake_up(&cdev->private->wait_q); 916 wake_up(&cdev->private->wait_q);
1121} 917}
1122 918
@@ -1126,17 +922,11 @@ ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1126 int ret; 922 int ret;
1127 923
1128 ret = ccw_device_cancel_halt_clear(cdev); 924 ret = ccw_device_cancel_halt_clear(cdev);
1129 switch (ret) { 925 if (ret == -EBUSY) {
1130 case 0: 926 ccw_device_set_timeout(cdev, HZ/10);
1131 cdev->private->state = DEV_STATE_OFFLINE; 927 } else {
1132 wake_up(&cdev->private->wait_q);
1133 break;
1134 case -ENODEV:
1135 cdev->private->state = DEV_STATE_NOT_OPER; 928 cdev->private->state = DEV_STATE_NOT_OPER;
1136 wake_up(&cdev->private->wait_q); 929 wake_up(&cdev->private->wait_q);
1137 break;
1138 default:
1139 ccw_device_set_timeout(cdev, HZ/10);
1140 } 930 }
1141} 931}
1142 932
@@ -1150,50 +940,37 @@ ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1150} 940}
1151 941
1152/* 942/*
1153 * Bug operation action.
1154 */
1155static void
1156ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1157{
1158 CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1159 "0.%x.%04x\n", cdev->private->state, dev_event,
1160 cdev->private->dev_id.ssid,
1161 cdev->private->dev_id.devno);
1162 BUG();
1163}
1164
1165/*
1166 * device statemachine 943 * device statemachine
1167 */ 944 */
1168fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 945fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1169 [DEV_STATE_NOT_OPER] = { 946 [DEV_STATE_NOT_OPER] = {
1170 [DEV_EVENT_NOTOPER] = ccw_device_nop, 947 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1171 [DEV_EVENT_INTERRUPT] = ccw_device_bug, 948 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1172 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 949 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1173 [DEV_EVENT_VERIFY] = ccw_device_nop, 950 [DEV_EVENT_VERIFY] = ccw_device_nop,
1174 }, 951 },
1175 [DEV_STATE_SENSE_PGID] = { 952 [DEV_STATE_SENSE_PGID] = {
1176 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 953 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1177 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 954 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1178 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 955 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1179 [DEV_EVENT_VERIFY] = ccw_device_nop, 956 [DEV_EVENT_VERIFY] = ccw_device_nop,
1180 }, 957 },
1181 [DEV_STATE_SENSE_ID] = { 958 [DEV_STATE_SENSE_ID] = {
1182 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 959 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1183 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 960 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1184 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 961 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1185 [DEV_EVENT_VERIFY] = ccw_device_nop, 962 [DEV_EVENT_VERIFY] = ccw_device_nop,
1186 }, 963 },
1187 [DEV_STATE_OFFLINE] = { 964 [DEV_STATE_OFFLINE] = {
1188 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 965 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1189 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 966 [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
1190 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 967 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1191 [DEV_EVENT_VERIFY] = ccw_device_offline_verify, 968 [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
1192 }, 969 },
1193 [DEV_STATE_VERIFY] = { 970 [DEV_STATE_VERIFY] = {
1194 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 971 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1195 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 972 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1196 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 973 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1197 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 974 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1198 }, 975 },
1199 [DEV_STATE_ONLINE] = { 976 [DEV_STATE_ONLINE] = {
@@ -1209,24 +986,18 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1209 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 986 [DEV_EVENT_VERIFY] = ccw_device_online_verify,
1210 }, 987 },
1211 [DEV_STATE_DISBAND_PGID] = { 988 [DEV_STATE_DISBAND_PGID] = {
1212 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 989 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1213 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 990 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1214 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 991 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1215 [DEV_EVENT_VERIFY] = ccw_device_nop, 992 [DEV_EVENT_VERIFY] = ccw_device_nop,
1216 }, 993 },
1217 [DEV_STATE_BOXED] = { 994 [DEV_STATE_BOXED] = {
1218 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 995 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1219 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 996 [DEV_EVENT_INTERRUPT] = ccw_device_nop,
1220 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done,
1221 [DEV_EVENT_VERIFY] = ccw_device_nop,
1222 },
1223 /* states to wait for i/o completion before doing something */
1224 [DEV_STATE_CLEAR_VERIFY] = {
1225 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1226 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify,
1227 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 997 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1228 [DEV_EVENT_VERIFY] = ccw_device_nop, 998 [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
1229 }, 999 },
1000 /* states to wait for i/o completion before doing something */
1230 [DEV_STATE_TIMEOUT_KILL] = { 1001 [DEV_STATE_TIMEOUT_KILL] = {
1231 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1002 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
1232 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1003 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
@@ -1243,13 +1014,13 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1243 [DEV_STATE_DISCONNECTED] = { 1014 [DEV_STATE_DISCONNECTED] = {
1244 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1015 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1245 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1016 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1246 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1017 [DEV_EVENT_TIMEOUT] = ccw_device_nop,
1247 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1018 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1248 }, 1019 },
1249 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1020 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1250 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1021 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1251 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1022 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1252 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1023 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1253 [DEV_EVENT_VERIFY] = ccw_device_nop, 1024 [DEV_EVENT_VERIFY] = ccw_device_nop,
1254 }, 1025 },
1255 [DEV_STATE_CMFCHANGE] = { 1026 [DEV_STATE_CMFCHANGE] = {
@@ -1264,6 +1035,12 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1264 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1035 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
1265 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1036 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
1266 }, 1037 },
1038 [DEV_STATE_STEAL_LOCK] = {
1039 [DEV_EVENT_NOTOPER] = ccw_device_request_event,
1040 [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
1041 [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
1042 [DEV_EVENT_VERIFY] = ccw_device_nop,
1043 },
1267}; 1044};
1268 1045
1269EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1046EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 1bdaa614e34f..78a0b43862c5 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,40 +1,39 @@
1/* 1/*
2 * drivers/s390/cio/device_id.c 2 * CCW device SENSE ID I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Sense ID functions.
10 */ 8 */
11 9
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/kernel.h> 10#include <linux/kernel.h>
15 11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/errno.h>
16#include <asm/ccwdev.h> 14#include <asm/ccwdev.h>
17#include <asm/delay.h> 15#include <asm/setup.h>
18#include <asm/cio.h> 16#include <asm/cio.h>
19#include <asm/lowcore.h>
20#include <asm/diag.h> 17#include <asm/diag.h>
21 18
22#include "cio.h" 19#include "cio.h"
23#include "cio_debug.h" 20#include "cio_debug.h"
24#include "css.h"
25#include "device.h" 21#include "device.h"
26#include "ioasm.h"
27#include "io_sch.h" 22#include "io_sch.h"
28 23
24#define SENSE_ID_RETRIES 256
25#define SENSE_ID_TIMEOUT (10 * HZ)
26#define SENSE_ID_MIN_LEN 4
27#define SENSE_ID_BASIC_LEN 7
28
29/** 29/**
30 * vm_vdev_to_cu_type - Convert vm virtual device into control unit type 30 * diag210_to_senseid - convert diag 0x210 data to sense id information
31 * for certain devices. 31 * @senseid: sense id
32 * @class: virtual device class 32 * @diag: diag 0x210 data
33 * @type: virtual device type
34 * 33 *
35 * Returns control unit type if a match was made or %0xffff otherwise. 34 * Return 0 on success, non-zero otherwise.
36 */ 35 */
37static int vm_vdev_to_cu_type(int class, int type) 36static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
38{ 37{
39 static struct { 38 static struct {
40 int class, type, cu_type; 39 int class, type, cu_type;
@@ -71,253 +70,153 @@ static int vm_vdev_to_cu_type(int class, int type)
71 }; 70 };
72 int i; 71 int i;
73 72
74 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) 73 /* Special case for osa devices. */
75 if (class == vm_devices[i].class && type == vm_devices[i].type) 74 if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
76 return vm_devices[i].cu_type; 75 senseid->cu_type = 0x3088;
76 senseid->cu_model = 0x60;
77 senseid->reserved = 0xff;
78 return 0;
79 }
80 for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
81 if (diag->vrdcvcla == vm_devices[i].class &&
82 diag->vrdcvtyp == vm_devices[i].type) {
83 senseid->cu_type = vm_devices[i].cu_type;
84 senseid->reserved = 0xff;
85 return 0;
86 }
87 }
77 88
78 return 0xffff; 89 return -ENODEV;
79} 90}
80 91
81/** 92/**
82 * diag_get_dev_info - retrieve device information via DIAG X'210' 93 * diag_get_dev_info - retrieve device information via diag 0x210
83 * @devno: device number 94 * @cdev: ccw device
84 * @ps: pointer to sense ID data area
85 * 95 *
86 * Returns zero on success, non-zero otherwise. 96 * Returns zero on success, non-zero otherwise.
87 */ 97 */
88static int diag_get_dev_info(u16 devno, struct senseid *ps) 98static int diag210_get_dev_info(struct ccw_device *cdev)
89{ 99{
100 struct ccw_dev_id *dev_id = &cdev->private->dev_id;
101 struct senseid *senseid = &cdev->private->senseid;
90 struct diag210 diag_data; 102 struct diag210 diag_data;
91 int ccode; 103 int rc;
92 104
93 CIO_TRACE_EVENT (4, "VMvdinf"); 105 if (dev_id->ssid != 0)
94 106 return -ENODEV;
95 diag_data = (struct diag210) { 107 memset(&diag_data, 0, sizeof(diag_data));
96 .vrdcdvno = devno, 108 diag_data.vrdcdvno = dev_id->devno;
97 .vrdclen = sizeof (diag_data), 109 diag_data.vrdclen = sizeof(diag_data);
98 }; 110 rc = diag210(&diag_data);
99 111 CIO_TRACE_EVENT(4, "diag210");
100 ccode = diag210 (&diag_data); 112 CIO_HEX_EVENT(4, &rc, sizeof(rc));
101 if ((ccode == 0) || (ccode == 2)) { 113 CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
102 ps->reserved = 0xff; 114 if (rc != 0 && rc != 2)
103 115 goto err_failed;
104 /* Special case for osa devices. */ 116 if (diag210_to_senseid(senseid, &diag_data))
105 if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) { 117 goto err_unknown;
106 ps->cu_type = 0x3088; 118 return 0;
107 ps->cu_model = 0x60; 119
108 return 0; 120err_unknown:
109 } 121 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
110 ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla, 122 dev_id->ssid, dev_id->devno);
111 diag_data.vrdcvtyp); 123 return -ENODEV;
112 if (ps->cu_type != 0xffff) 124err_failed:
113 return 0; 125 CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
114 } 126 dev_id->ssid, dev_id->devno, rc);
115
116 CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
117 "vdev class : %02X, vdev type : %04X \n ... "
118 "rdev class : %02X, rdev type : %04X, "
119 "rdev model: %02X\n",
120 devno, ccode,
121 diag_data.vrdcvcla, diag_data.vrdcvtyp,
122 diag_data.vrdcrccl, diag_data.vrdccrty,
123 diag_data.vrdccrmd);
124
125 return -ENODEV; 127 return -ENODEV;
126} 128}
127 129
128/* 130/*
129 * Start Sense ID helper function. 131 * Initialize SENSE ID data.
130 * Try to obtain the 'control unit'/'device type' information
131 * associated with the subchannel.
132 */ 132 */
133static int 133static void snsid_init(struct ccw_device *cdev)
134__ccw_device_sense_id_start(struct ccw_device *cdev)
135{
136 struct subchannel *sch;
137 struct ccw1 *ccw;
138 int ret;
139
140 sch = to_subchannel(cdev->dev.parent);
141 /* Setup sense channel program. */
142 ccw = cdev->private->iccws;
143 ccw->cmd_code = CCW_CMD_SENSE_ID;
144 ccw->cda = (__u32) __pa (&cdev->private->senseid);
145 ccw->count = sizeof (struct senseid);
146 ccw->flags = CCW_FLAG_SLI;
147
148 /* Reset device status. */
149 memset(&cdev->private->irb, 0, sizeof(struct irb));
150
151 /* Try on every path. */
152 ret = -ENODEV;
153 while (cdev->private->imask != 0) {
154 cdev->private->senseid.cu_type = 0xFFFF;
155 if ((sch->opm & cdev->private->imask) != 0 &&
156 cdev->private->iretry > 0) {
157 cdev->private->iretry--;
158 /* Reset internal retry indication. */
159 cdev->private->flags.intretry = 0;
160 ret = cio_start (sch, cdev->private->iccws,
161 cdev->private->imask);
162 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
163 if (ret != -EACCES)
164 return ret;
165 }
166 cdev->private->imask >>= 1;
167 cdev->private->iretry = 5;
168 }
169 return ret;
170}
171
172void
173ccw_device_sense_id_start(struct ccw_device *cdev)
174{ 134{
175 int ret; 135 cdev->private->flags.esid = 0;
176 136 memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
177 memset (&cdev->private->senseid, 0, sizeof (struct senseid)); 137 cdev->private->senseid.cu_type = 0xffff;
178 cdev->private->imask = 0x80;
179 cdev->private->iretry = 5;
180 ret = __ccw_device_sense_id_start(cdev);
181 if (ret && ret != -EBUSY)
182 ccw_device_sense_id_done(cdev, ret);
183} 138}
184 139
185/* 140/*
186 * Called from interrupt context to check if a valid answer 141 * Check for complete SENSE ID data.
187 * to Sense ID was received.
188 */ 142 */
189static int 143static int snsid_check(struct ccw_device *cdev, void *data)
190ccw_device_check_sense_id(struct ccw_device *cdev)
191{ 144{
192 struct subchannel *sch; 145 struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
193 struct irb *irb; 146 int len = sizeof(struct senseid) - scsw->count;
194 147
195 sch = to_subchannel(cdev->dev.parent); 148 /* Check for incomplete SENSE ID data. */
196 irb = &cdev->private->irb; 149 if (len < SENSE_ID_MIN_LEN)
197 150 goto out_restart;
198 /* Check the error cases. */ 151 if (cdev->private->senseid.cu_type == 0xffff)
199 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 152 goto out_restart;
200 /* Retry Sense ID if requested. */ 153 /* Check for incompatible SENSE ID data. */
201 if (cdev->private->flags.intretry) { 154 if (cdev->private->senseid.reserved != 0xff)
202 cdev->private->flags.intretry = 0;
203 return -EAGAIN;
204 }
205 return -ETIME;
206 }
207 if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) {
208 /*
209 * if the device doesn't support the SenseID
210 * command further retries wouldn't help ...
211 * NB: We don't check here for intervention required like we
212 * did before, because tape devices with no tape inserted
213 * may present this status *in conjunction with* the
214 * sense id information. So, for intervention required,
215 * we use the "whack it until it talks" strategy...
216 */
217 CIO_MSG_EVENT(0, "SenseID : device %04x on Subchannel "
218 "0.%x.%04x reports cmd reject\n",
219 cdev->private->dev_id.devno, sch->schid.ssid,
220 sch->schid.sch_no);
221 return -EOPNOTSUPP; 155 return -EOPNOTSUPP;
222 } 156 /* Check for extended-identification information. */
223 if (irb->esw.esw0.erw.cons) { 157 if (len > SENSE_ID_BASIC_LEN)
224 CIO_MSG_EVENT(2, "SenseID : UC on dev 0.%x.%04x, " 158 cdev->private->flags.esid = 1;
225 "lpum %02X, cnt %02d, sns :" 159 return 0;
226 " %02X%02X%02X%02X %02X%02X%02X%02X ...\n",
227 cdev->private->dev_id.ssid,
228 cdev->private->dev_id.devno,
229 irb->esw.esw0.sublog.lpum,
230 irb->esw.esw0.erw.scnt,
231 irb->ecw[0], irb->ecw[1],
232 irb->ecw[2], irb->ecw[3],
233 irb->ecw[4], irb->ecw[5],
234 irb->ecw[6], irb->ecw[7]);
235 return -EAGAIN;
236 }
237 if (irb->scsw.cmd.cc == 3) {
238 u8 lpm;
239 160
240 lpm = to_io_private(sch)->orb.cmd.lpm; 161out_restart:
241 if ((lpm & sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) 162 snsid_init(cdev);
242 CIO_MSG_EVENT(4, "SenseID : path %02X for device %04x "
243 "on subchannel 0.%x.%04x is "
244 "'not operational'\n", lpm,
245 cdev->private->dev_id.devno,
246 sch->schid.ssid, sch->schid.sch_no);
247 return -EACCES;
248 }
249
250 /* Did we get a proper answer ? */
251 if (irb->scsw.cmd.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
252 cdev->private->senseid.reserved == 0xFF) {
253 if (irb->scsw.cmd.count < sizeof(struct senseid) - 8)
254 cdev->private->flags.esid = 1;
255 return 0; /* Success */
256 }
257
258 /* Hmm, whatever happened, try again. */
259 CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
260 "subchannel 0.%x.%04x returns status %02X%02X\n",
261 cdev->private->dev_id.devno, sch->schid.ssid,
262 sch->schid.sch_no,
263 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
264 return -EAGAIN; 163 return -EAGAIN;
265} 164}
266 165
267/* 166/*
268 * Got interrupt for Sense ID. 167 * Process SENSE ID request result.
269 */ 168 */
270void 169static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
271ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
272{ 170{
273 struct subchannel *sch; 171 struct ccw_dev_id *id = &cdev->private->dev_id;
274 struct irb *irb; 172 struct senseid *senseid = &cdev->private->senseid;
275 int ret; 173 int vm = 0;
276 174
277 sch = to_subchannel(cdev->dev.parent); 175 if (rc && MACHINE_IS_VM) {
278 irb = (struct irb *) __LC_IRB; 176 /* Try diag 0x210 fallback on z/VM. */
279 /* Retry sense id, if needed. */ 177 snsid_init(cdev);
280 if (irb->scsw.cmd.stctl == 178 if (diag210_get_dev_info(cdev) == 0) {
281 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 179 rc = 0;
282 if ((irb->scsw.cmd.cc == 1) || !irb->scsw.cmd.actl) { 180 vm = 1;
283 ret = __ccw_device_sense_id_start(cdev);
284 if (ret && ret != -EBUSY)
285 ccw_device_sense_id_done(cdev, ret);
286 } 181 }
287 return;
288 } 182 }
289 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 183 CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
290 return; 184 "%04x/%02x%s\n", id->ssid, id->devno, rc,
291 ret = ccw_device_check_sense_id(cdev); 185 senseid->cu_type, senseid->cu_model, senseid->dev_type,
292 memset(&cdev->private->irb, 0, sizeof(struct irb)); 186 senseid->dev_model, vm ? " (diag210)" : "");
293 switch (ret) { 187 ccw_device_sense_id_done(cdev, rc);
294 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */ 188}
295 case 0: /* Sense id succeeded. */
296 case -ETIME: /* Sense id stopped by timeout. */
297 ccw_device_sense_id_done(cdev, ret);
298 break;
299 case -EACCES: /* channel is not operational. */
300 sch->lpm &= ~cdev->private->imask;
301 cdev->private->imask >>= 1;
302 cdev->private->iretry = 5;
303 /* fall through. */
304 case -EAGAIN: /* try again. */
305 ret = __ccw_device_sense_id_start(cdev);
306 if (ret == 0 || ret == -EBUSY)
307 break;
308 /* fall through. */
309 default: /* Sense ID failed. Try asking VM. */
310 if (MACHINE_IS_VM)
311 ret = diag_get_dev_info(cdev->private->dev_id.devno,
312 &cdev->private->senseid);
313 else
314 /*
315 * If we can't couldn't identify the device type we
316 * consider the device "not operational".
317 */
318 ret = -ENODEV;
319 189
320 ccw_device_sense_id_done(cdev, ret); 190/**
321 break; 191 * ccw_device_sense_id_start - perform SENSE ID
322 } 192 * @cdev: ccw device
193 *
194 * Execute a SENSE ID channel program on @cdev to update its sense id
195 * information. When finished, call ccw_device_sense_id_done with a
196 * return code specifying the result.
197 */
198void ccw_device_sense_id_start(struct ccw_device *cdev)
199{
200 struct subchannel *sch = to_subchannel(cdev->dev.parent);
201 struct ccw_request *req = &cdev->private->req;
202 struct ccw1 *cp = cdev->private->iccws;
203
204 CIO_TRACE_EVENT(4, "snsid");
205 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
206 /* Data setup. */
207 snsid_init(cdev);
208 /* Channel program setup. */
209 cp->cmd_code = CCW_CMD_SENSE_ID;
210 cp->cda = (u32) (addr_t) &cdev->private->senseid;
211 cp->count = sizeof(struct senseid);
212 cp->flags = CCW_FLAG_SLI;
213 /* Request setup. */
214 memset(req, 0, sizeof(*req));
215 req->cp = cp;
216 req->timeout = SENSE_ID_TIMEOUT;
217 req->maxretries = SENSE_ID_RETRIES;
218 req->lpm = sch->schib.pmcw.pam & sch->opm;
219 req->check = snsid_check;
220 req->callback = snsid_callback;
221 ccw_request_start(cdev);
323} 222}
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 2d0efee8a290..6da84543dfe9 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -11,6 +11,7 @@
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/completion.h>
14 15
15#include <asm/ccwdev.h> 16#include <asm/ccwdev.h>
16#include <asm/idals.h> 17#include <asm/idals.h>
@@ -46,6 +47,7 @@ int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
46 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; 47 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
47 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; 48 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
48 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; 49 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
50 cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
49 return 0; 51 return 0;
50} 52}
51 53
@@ -74,6 +76,7 @@ int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
74 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0; 76 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
75 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0; 77 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
76 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0; 78 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
79 cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
77 return 0; 80 return 0;
78} 81}
79 82
@@ -90,9 +93,34 @@ void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
90 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0; 93 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
91 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0; 94 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
92 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; 95 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
96 cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
93} 97}
94 98
95/** 99/**
100 * ccw_device_is_pathgroup - determine if paths to this device are grouped
101 * @cdev: ccw device
102 *
103 * Return non-zero if there is a path group, zero otherwise.
104 */
105int ccw_device_is_pathgroup(struct ccw_device *cdev)
106{
107 return cdev->private->flags.pgroup;
108}
109EXPORT_SYMBOL(ccw_device_is_pathgroup);
110
111/**
112 * ccw_device_is_multipath - determine if device is operating in multipath mode
113 * @cdev: ccw device
114 *
115 * Return non-zero if device is operating in multipath mode, zero otherwise.
116 */
117int ccw_device_is_multipath(struct ccw_device *cdev)
118{
119 return cdev->private->flags.mpath;
120}
121EXPORT_SYMBOL(ccw_device_is_multipath);
122
123/**
96 * ccw_device_clear() - terminate I/O request processing 124 * ccw_device_clear() - terminate I/O request processing
97 * @cdev: target ccw device 125 * @cdev: target ccw device
98 * @intparm: interruption parameter; value is only used if no I/O is 126 * @intparm: interruption parameter; value is only used if no I/O is
@@ -167,8 +195,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
167 return -EINVAL; 195 return -EINVAL;
168 if (cdev->private->state == DEV_STATE_NOT_OPER) 196 if (cdev->private->state == DEV_STATE_NOT_OPER)
169 return -ENODEV; 197 return -ENODEV;
170 if (cdev->private->state == DEV_STATE_VERIFY || 198 if (cdev->private->state == DEV_STATE_VERIFY) {
171 cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
172 /* Remember to fake irb when finished. */ 199 /* Remember to fake irb when finished. */
173 if (!cdev->private->flags.fake_irb) { 200 if (!cdev->private->flags.fake_irb) {
174 cdev->private->flags.fake_irb = 1; 201 cdev->private->flags.fake_irb = 1;
@@ -478,74 +505,65 @@ __u8 ccw_device_get_path_mask(struct ccw_device *cdev)
478 return sch->lpm; 505 return sch->lpm;
479} 506}
480 507
481/* 508struct stlck_data {
482 * Try to break the lock on a boxed device. 509 struct completion done;
483 */ 510 int rc;
484int 511};
485ccw_device_stlck(struct ccw_device *cdev)
486{
487 void *buf, *buf2;
488 unsigned long flags;
489 struct subchannel *sch;
490 int ret;
491 512
492 if (!cdev) 513void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
493 return -ENODEV; 514{
515 struct stlck_data *sdata = data;
494 516
495 if (cdev->drv && !cdev->private->options.force) 517 sdata->rc = rc;
496 return -EINVAL; 518 complete(&sdata->done);
519}
497 520
498 sch = to_subchannel(cdev->dev.parent); 521/*
499 522 * Perform unconditional reserve + release.
500 CIO_TRACE_EVENT(2, "stl lock"); 523 */
501 CIO_TRACE_EVENT(2, dev_name(&cdev->dev)); 524int ccw_device_stlck(struct ccw_device *cdev)
525{
526 struct subchannel *sch = to_subchannel(cdev->dev.parent);
527 struct stlck_data data;
528 u8 *buffer;
529 int rc;
502 530
503 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 531 /* Check if steal lock operation is valid for this device. */
504 if (!buf) 532 if (cdev->drv) {
505 return -ENOMEM; 533 if (!cdev->private->options.force)
506 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); 534 return -EINVAL;
507 if (!buf2) {
508 kfree(buf);
509 return -ENOMEM;
510 } 535 }
511 spin_lock_irqsave(sch->lock, flags); 536 buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
512 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 537 if (!buffer)
513 if (ret) 538 return -ENOMEM;
514 goto out_unlock; 539 init_completion(&data.done);
515 /* 540 data.rc = -EIO;
516 * Setup ccw. We chain an unconditional reserve and a release so we 541 spin_lock_irq(sch->lock);
517 * only break the lock. 542 rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
518 */ 543 if (rc)
519 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
520 cdev->private->iccws[0].cda = (__u32) __pa(buf);
521 cdev->private->iccws[0].count = 32;
522 cdev->private->iccws[0].flags = CCW_FLAG_CC;
523 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
524 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
525 cdev->private->iccws[1].count = 32;
526 cdev->private->iccws[1].flags = 0;
527 ret = cio_start(sch, cdev->private->iccws, 0);
528 if (ret) {
529 cio_disable_subchannel(sch); //FIXME: return code?
530 goto out_unlock; 544 goto out_unlock;
545 /* Perform operation. */
546 cdev->private->state = DEV_STATE_STEAL_LOCK,
547 ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
548 spin_unlock_irq(sch->lock);
549 /* Wait for operation to finish. */
550 if (wait_for_completion_interruptible(&data.done)) {
551 /* Got a signal. */
552 spin_lock_irq(sch->lock);
553 ccw_request_cancel(cdev);
554 spin_unlock_irq(sch->lock);
555 wait_for_completion(&data.done);
531 } 556 }
532 cdev->private->irb.scsw.cmd.actl |= SCSW_ACTL_START_PEND; 557 rc = data.rc;
533 spin_unlock_irqrestore(sch->lock, flags); 558 /* Check results. */
534 wait_event(cdev->private->wait_q, 559 spin_lock_irq(sch->lock);
535 cdev->private->irb.scsw.cmd.actl == 0); 560 cio_disable_subchannel(sch);
536 spin_lock_irqsave(sch->lock, flags); 561 cdev->private->state = DEV_STATE_BOXED;
537 cio_disable_subchannel(sch); //FIXME: return code?
538 if ((cdev->private->irb.scsw.cmd.dstat !=
539 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
540 (cdev->private->irb.scsw.cmd.cstat != 0))
541 ret = -EIO;
542 /* Clear irb. */
543 memset(&cdev->private->irb, 0, sizeof(struct irb));
544out_unlock: 562out_unlock:
545 kfree(buf); 563 spin_unlock_irq(sch->lock);
546 kfree(buf2); 564 kfree(buffer);
547 spin_unlock_irqrestore(sch->lock, flags); 565
548 return ret; 566 return rc;
549} 567}
550 568
551void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) 569void *ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index fc5ca1dd52b3..aad188e43b4f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,594 +1,561 @@
1/* 1/*
2 * drivers/s390/cio/device_pgid.c 2 * CCW device PGID and path verification I/O handling.
3 * 3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 4 * Copyright IBM Corp. 2002,2009
5 * IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 *
9 * Path Group ID functions.
10 */ 8 */
11 9
12#include <linux/module.h> 10#include <linux/kernel.h>
13#include <linux/init.h> 11#include <linux/string.h>
14 12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/bitops.h>
15#include <asm/ccwdev.h> 15#include <asm/ccwdev.h>
16#include <asm/cio.h> 16#include <asm/cio.h>
17#include <asm/delay.h>
18#include <asm/lowcore.h>
19 17
20#include "cio.h" 18#include "cio.h"
21#include "cio_debug.h" 19#include "cio_debug.h"
22#include "css.h"
23#include "device.h" 20#include "device.h"
24#include "ioasm.h"
25#include "io_sch.h" 21#include "io_sch.h"
26 22
23#define PGID_RETRIES 256
24#define PGID_TIMEOUT (10 * HZ)
25
27/* 26/*
28 * Helper function called from interrupt context to decide whether an 27 * Process path verification data and report result.
29 * operation should be tried again.
30 */ 28 */
31static int __ccw_device_should_retry(union scsw *scsw) 29static void verify_done(struct ccw_device *cdev, int rc)
32{ 30{
33 /* CC is only valid if start function bit is set. */ 31 struct subchannel *sch = to_subchannel(cdev->dev.parent);
34 if ((scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && scsw->cmd.cc == 1) 32 struct ccw_dev_id *id = &cdev->private->dev_id;
35 return 1; 33 int mpath = cdev->private->flags.mpath;
36 /* No more activity. For sense and set PGID we stubbornly try again. */ 34 int pgroup = cdev->private->flags.pgroup;
37 if (!scsw->cmd.actl) 35
38 return 1; 36 if (rc)
39 return 0; 37 goto out;
38 /* Ensure consistent multipathing state at device and channel. */
39 if (sch->config.mp != mpath) {
40 sch->config.mp = mpath;
41 rc = cio_commit_config(sch);
42 }
43out:
44 CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
45 "vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
46 sch->vpm);
47 ccw_device_verify_done(cdev, rc);
40} 48}
41 49
42/* 50/*
43 * Start Sense Path Group ID helper function. Used in ccw_device_recog 51 * Create channel program to perform a NOOP.
44 * and ccw_device_sense_pgid.
45 */ 52 */
46static int 53static void nop_build_cp(struct ccw_device *cdev)
47__ccw_device_sense_pgid_start(struct ccw_device *cdev)
48{ 54{
49 struct subchannel *sch; 55 struct ccw_request *req = &cdev->private->req;
50 struct ccw1 *ccw; 56 struct ccw1 *cp = cdev->private->iccws;
51 int ret; 57
52 int i; 58 cp->cmd_code = CCW_CMD_NOOP;
53 59 cp->cda = 0;
54 sch = to_subchannel(cdev->dev.parent); 60 cp->count = 0;
55 /* Return if we already checked on all paths. */ 61 cp->flags = CCW_FLAG_SLI;
56 if (cdev->private->imask == 0) 62 req->cp = cp;
57 return (sch->lpm == 0) ? -ENODEV : -EACCES;
58 i = 8 - ffs(cdev->private->imask);
59
60 /* Setup sense path group id channel program. */
61 ccw = cdev->private->iccws;
62 ccw->cmd_code = CCW_CMD_SENSE_PGID;
63 ccw->count = sizeof (struct pgid);
64 ccw->flags = CCW_FLAG_SLI;
65
66 /* Reset device status. */
67 memset(&cdev->private->irb, 0, sizeof(struct irb));
68 /* Try on every path. */
69 ret = -ENODEV;
70 while (cdev->private->imask != 0) {
71 /* Try every path multiple times. */
72 ccw->cda = (__u32) __pa (&cdev->private->pgid[i]);
73 if (cdev->private->iretry > 0) {
74 cdev->private->iretry--;
75 /* Reset internal retry indication. */
76 cdev->private->flags.intretry = 0;
77 ret = cio_start (sch, cdev->private->iccws,
78 cdev->private->imask);
79 /* ret is 0, -EBUSY, -EACCES or -ENODEV */
80 if (ret != -EACCES)
81 return ret;
82 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel "
83 "0.%x.%04x, lpm %02X, became 'not "
84 "operational'\n",
85 cdev->private->dev_id.devno,
86 sch->schid.ssid,
87 sch->schid.sch_no, cdev->private->imask);
88
89 }
90 cdev->private->imask >>= 1;
91 cdev->private->iretry = 5;
92 i++;
93 }
94
95 return ret;
96} 63}
97 64
98void 65/*
99ccw_device_sense_pgid_start(struct ccw_device *cdev) 66 * Perform NOOP on a single path.
67 */
68static void nop_do(struct ccw_device *cdev)
100{ 69{
101 int ret; 70 struct subchannel *sch = to_subchannel(cdev->dev.parent);
102 71 struct ccw_request *req = &cdev->private->req;
103 /* Set a timeout of 60s */ 72
104 ccw_device_set_timeout(cdev, 60*HZ); 73 /* Adjust lpm. */
105 74 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm);
106 cdev->private->state = DEV_STATE_SENSE_PGID; 75 if (!req->lpm)
107 cdev->private->imask = 0x80; 76 goto out_nopath;
108 cdev->private->iretry = 5; 77 nop_build_cp(cdev);
109 memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid)); 78 ccw_request_start(cdev);
110 ret = __ccw_device_sense_pgid_start(cdev); 79 return;
111 if (ret && ret != -EBUSY) 80
112 ccw_device_sense_pgid_done(cdev, ret); 81out_nopath:
82 verify_done(cdev, sch->vpm ? 0 : -EACCES);
113} 83}
114 84
115/* 85/*
116 * Called from interrupt context to check if a valid answer 86 * Adjust NOOP I/O status.
117 * to Sense Path Group ID was received.
118 */ 87 */
119static int 88static enum io_status nop_filter(struct ccw_device *cdev, void *data,
120__ccw_device_check_sense_pgid(struct ccw_device *cdev) 89 struct irb *irb, enum io_status status)
121{ 90{
122 struct subchannel *sch; 91 /* Only subchannel status might indicate a path error. */
123 struct irb *irb; 92 if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
124 int i; 93 return IO_DONE;
125 94 return status;
126 sch = to_subchannel(cdev->dev.parent);
127 irb = &cdev->private->irb;
128 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
129 /* Retry Sense PGID if requested. */
130 if (cdev->private->flags.intretry) {
131 cdev->private->flags.intretry = 0;
132 return -EAGAIN;
133 }
134 return -ETIME;
135 }
136 if (irb->esw.esw0.erw.cons &&
137 (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) {
138 /*
139 * If the device doesn't support the Sense Path Group ID
140 * command further retries wouldn't help ...
141 */
142 return -EOPNOTSUPP;
143 }
144 if (irb->esw.esw0.erw.cons) {
145 CIO_MSG_EVENT(2, "SNID - device 0.%x.%04x, unit check, "
146 "lpum %02X, cnt %02d, sns : "
147 "%02X%02X%02X%02X %02X%02X%02X%02X ...\n",
148 cdev->private->dev_id.ssid,
149 cdev->private->dev_id.devno,
150 irb->esw.esw0.sublog.lpum,
151 irb->esw.esw0.erw.scnt,
152 irb->ecw[0], irb->ecw[1],
153 irb->ecw[2], irb->ecw[3],
154 irb->ecw[4], irb->ecw[5],
155 irb->ecw[6], irb->ecw[7]);
156 return -EAGAIN;
157 }
158 if (irb->scsw.cmd.cc == 3) {
159 u8 lpm;
160
161 lpm = to_io_private(sch)->orb.cmd.lpm;
162 CIO_MSG_EVENT(3, "SNID - Device %04x on Subchannel 0.%x.%04x,"
163 " lpm %02X, became 'not operational'\n",
164 cdev->private->dev_id.devno, sch->schid.ssid,
165 sch->schid.sch_no, lpm);
166 return -EACCES;
167 }
168 i = 8 - ffs(cdev->private->imask);
169 if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) {
170 CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x "
171 "is reserved by someone else\n",
172 cdev->private->dev_id.devno, sch->schid.ssid,
173 sch->schid.sch_no);
174 return -EUSERS;
175 }
176 return 0;
177} 95}
178 96
179/* 97/*
180 * Got interrupt for Sense Path Group ID. 98 * Process NOOP request result for a single path.
181 */ 99 */
182void 100static void nop_callback(struct ccw_device *cdev, void *data, int rc)
183ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
184{ 101{
185 struct subchannel *sch; 102 struct subchannel *sch = to_subchannel(cdev->dev.parent);
186 struct irb *irb; 103 struct ccw_request *req = &cdev->private->req;
187 int ret; 104
188 105 if (rc == 0)
189 irb = (struct irb *) __LC_IRB; 106 sch->vpm |= req->lpm;
190 107 else if (rc != -EACCES)
191 if (irb->scsw.cmd.stctl == 108 goto err;
192 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 109 req->lpm >>= 1;
193 if (__ccw_device_should_retry(&irb->scsw)) { 110 nop_do(cdev);
194 ret = __ccw_device_sense_pgid_start(cdev); 111 return;
195 if (ret && ret != -EBUSY) 112
196 ccw_device_sense_pgid_done(cdev, ret); 113err:
197 } 114 verify_done(cdev, rc);
198 return;
199 }
200 if (ccw_device_accumulate_and_sense(cdev, irb) != 0)
201 return;
202 sch = to_subchannel(cdev->dev.parent);
203 ret = __ccw_device_check_sense_pgid(cdev);
204 memset(&cdev->private->irb, 0, sizeof(struct irb));
205 switch (ret) {
206 /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */
207 case -EOPNOTSUPP: /* Sense Path Group ID not supported */
208 ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP);
209 break;
210 case -ETIME: /* Sense path group id stopped by timeout. */
211 ccw_device_sense_pgid_done(cdev, -ETIME);
212 break;
213 case -EACCES: /* channel is not operational. */
214 sch->lpm &= ~cdev->private->imask;
215 /* Fall through. */
216 case 0: /* Sense Path Group ID successful. */
217 cdev->private->imask >>= 1;
218 cdev->private->iretry = 5;
219 /* Fall through. */
220 case -EAGAIN: /* Try again. */
221 ret = __ccw_device_sense_pgid_start(cdev);
222 if (ret != 0 && ret != -EBUSY)
223 ccw_device_sense_pgid_done(cdev, ret);
224 break;
225 case -EUSERS: /* device is reserved for someone else. */
226 ccw_device_sense_pgid_done(cdev, -EUSERS);
227 break;
228 }
229} 115}
230 116
231/* 117/*
232 * Path Group ID helper function. 118 * Create channel program to perform SET PGID on a single path.
233 */ 119 */
234static int 120static void spid_build_cp(struct ccw_device *cdev, u8 fn)
235__ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
236{ 121{
237 struct subchannel *sch; 122 struct ccw_request *req = &cdev->private->req;
238 struct ccw1 *ccw; 123 struct ccw1 *cp = cdev->private->iccws;
239 int ret; 124 int i = 8 - ffs(req->lpm);
240 125 struct pgid *pgid = &cdev->private->pgid[i];
241 sch = to_subchannel(cdev->dev.parent); 126
242 127 pgid->inf.fc = fn;
243 /* Setup sense path group id channel program. */ 128 cp->cmd_code = CCW_CMD_SET_PGID;
244 cdev->private->pgid[0].inf.fc = func; 129 cp->cda = (u32) (addr_t) pgid;
245 ccw = cdev->private->iccws; 130 cp->count = sizeof(*pgid);
246 if (cdev->private->flags.pgid_single) 131 cp->flags = CCW_FLAG_SLI;
247 cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH; 132 req->cp = cp;
248 else
249 cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
250 ccw->cmd_code = CCW_CMD_SET_PGID;
251 ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
252 ccw->count = sizeof (struct pgid);
253 ccw->flags = CCW_FLAG_SLI;
254
255 /* Reset device status. */
256 memset(&cdev->private->irb, 0, sizeof(struct irb));
257
258 /* Try multiple times. */
259 ret = -EACCES;
260 if (cdev->private->iretry > 0) {
261 cdev->private->iretry--;
262 /* Reset internal retry indication. */
263 cdev->private->flags.intretry = 0;
264 ret = cio_start (sch, cdev->private->iccws,
265 cdev->private->imask);
266 /* We expect an interrupt in case of success or busy
267 * indication. */
268 if ((ret == 0) || (ret == -EBUSY))
269 return ret;
270 }
271 /* PGID command failed on this path. */
272 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel "
273 "0.%x.%04x, lpm %02X, became 'not operational'\n",
274 cdev->private->dev_id.devno, sch->schid.ssid,
275 sch->schid.sch_no, cdev->private->imask);
276 return ret;
277} 133}
278 134
279/* 135/*
280 * Helper function to send a nop ccw down a path. 136 * Perform establish/resign SET PGID on a single path.
281 */ 137 */
282static int __ccw_device_do_nop(struct ccw_device *cdev) 138static void spid_do(struct ccw_device *cdev)
283{ 139{
284 struct subchannel *sch; 140 struct subchannel *sch = to_subchannel(cdev->dev.parent);
285 struct ccw1 *ccw; 141 struct ccw_request *req = &cdev->private->req;
286 int ret; 142 u8 fn;
287 143
288 sch = to_subchannel(cdev->dev.parent); 144 /* Use next available path that is not already in correct state. */
289 145 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & ~sch->vpm);
290 /* Setup nop channel program. */ 146 if (!req->lpm)
291 ccw = cdev->private->iccws; 147 goto out_nopath;
292 ccw->cmd_code = CCW_CMD_NOOP; 148 /* Channel program setup. */
293 ccw->cda = 0; 149 if (req->lpm & sch->opm)
294 ccw->count = 0; 150 fn = SPID_FUNC_ESTABLISH;
295 ccw->flags = CCW_FLAG_SLI; 151 else
296 152 fn = SPID_FUNC_RESIGN;
297 /* Reset device status. */ 153 if (cdev->private->flags.mpath)
298 memset(&cdev->private->irb, 0, sizeof(struct irb)); 154 fn |= SPID_FUNC_MULTI_PATH;
299 155 spid_build_cp(cdev, fn);
300 /* Try multiple times. */ 156 ccw_request_start(cdev);
301 ret = -EACCES; 157 return;
302 if (cdev->private->iretry > 0) { 158
303 cdev->private->iretry--; 159out_nopath:
304 /* Reset internal retry indication. */ 160 verify_done(cdev, sch->vpm ? 0 : -EACCES);
305 cdev->private->flags.intretry = 0;
306 ret = cio_start (sch, cdev->private->iccws,
307 cdev->private->imask);
308 /* We expect an interrupt in case of success or busy
309 * indication. */
310 if ((ret == 0) || (ret == -EBUSY))
311 return ret;
312 }
313 /* nop command failed on this path. */
314 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel "
315 "0.%x.%04x, lpm %02X, became 'not operational'\n",
316 cdev->private->dev_id.devno, sch->schid.ssid,
317 sch->schid.sch_no, cdev->private->imask);
318 return ret;
319} 161}
320 162
163static void verify_start(struct ccw_device *cdev);
321 164
322/* 165/*
323 * Called from interrupt context to check if a valid answer 166 * Process SET PGID request result for a single path.
324 * to Set Path Group ID was received.
325 */ 167 */
326static int 168static void spid_callback(struct ccw_device *cdev, void *data, int rc)
327__ccw_device_check_pgid(struct ccw_device *cdev)
328{ 169{
329 struct subchannel *sch; 170 struct subchannel *sch = to_subchannel(cdev->dev.parent);
330 struct irb *irb; 171 struct ccw_request *req = &cdev->private->req;
331 172
332 sch = to_subchannel(cdev->dev.parent); 173 switch (rc) {
333 irb = &cdev->private->irb; 174 case 0:
334 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 175 sch->vpm |= req->lpm & sch->opm;
335 /* Retry Set PGID if requested. */ 176 break;
336 if (cdev->private->flags.intretry) { 177 case -EACCES:
337 cdev->private->flags.intretry = 0; 178 break;
338 return -EAGAIN; 179 case -EOPNOTSUPP:
180 if (cdev->private->flags.mpath) {
181 /* Try without multipathing. */
182 cdev->private->flags.mpath = 0;
183 goto out_restart;
339 } 184 }
340 return -ETIME; 185 /* Try without pathgrouping. */
186 cdev->private->flags.pgroup = 0;
187 goto out_restart;
188 default:
189 goto err;
341 } 190 }
342 if (irb->esw.esw0.erw.cons) { 191 req->lpm >>= 1;
343 if (irb->ecw[0] & SNS0_CMD_REJECT) 192 spid_do(cdev);
344 return -EOPNOTSUPP; 193 return;
345 /* Hmm, whatever happened, try again. */ 194
346 CIO_MSG_EVENT(2, "SPID - device 0.%x.%04x, unit check, " 195out_restart:
347 "cnt %02d, " 196 verify_start(cdev);
348 "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", 197 return;
349 cdev->private->dev_id.ssid, 198err:
350 cdev->private->dev_id.devno, 199 verify_done(cdev, rc);
351 irb->esw.esw0.erw.scnt, 200}
352 irb->ecw[0], irb->ecw[1], 201
353 irb->ecw[2], irb->ecw[3], 202static void spid_start(struct ccw_device *cdev)
354 irb->ecw[4], irb->ecw[5], 203{
355 irb->ecw[6], irb->ecw[7]); 204 struct ccw_request *req = &cdev->private->req;
356 return -EAGAIN; 205
357 } 206 /* Initialize request data. */
358 if (irb->scsw.cmd.cc == 3) { 207 memset(req, 0, sizeof(*req));
359 CIO_MSG_EVENT(3, "SPID - Device %04x on Subchannel 0.%x.%04x," 208 req->timeout = PGID_TIMEOUT;
360 " lpm %02X, became 'not operational'\n", 209 req->maxretries = PGID_RETRIES;
361 cdev->private->dev_id.devno, sch->schid.ssid, 210 req->lpm = 0x80;
362 sch->schid.sch_no, cdev->private->imask); 211 req->callback = spid_callback;
363 return -EACCES; 212 spid_do(cdev);
364 } 213}
365 return 0; 214
215static int pgid_cmp(struct pgid *p1, struct pgid *p2)
216{
217 return memcmp((char *) p1 + 1, (char *) p2 + 1,
218 sizeof(struct pgid) - 1);
366} 219}
367 220
368/* 221/*
369 * Called from interrupt context to check the path status after a nop has 222 * Determine pathgroup state from PGID data.
370 * been send.
371 */ 223 */
372static int __ccw_device_check_nop(struct ccw_device *cdev) 224static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
225 int *mismatch, int *reserved, int *reset)
373{ 226{
374 struct subchannel *sch; 227 struct pgid *pgid = &cdev->private->pgid[0];
375 struct irb *irb; 228 struct pgid *first = NULL;
376 229 int lpm;
377 sch = to_subchannel(cdev->dev.parent); 230 int i;
378 irb = &cdev->private->irb; 231
379 if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 232 *mismatch = 0;
380 /* Retry NOP if requested. */ 233 *reserved = 0;
381 if (cdev->private->flags.intretry) { 234 *reset = 0;
382 cdev->private->flags.intretry = 0; 235 for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
383 return -EAGAIN; 236 if ((cdev->private->pgid_valid_mask & lpm) == 0)
237 continue;
238 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
239 *reserved = 1;
240 if (pgid->inf.ps.state1 == SNID_STATE1_RESET) {
241 /* A PGID was reset. */
242 *reset = 1;
243 continue;
384 } 244 }
385 return -ETIME; 245 if (!first) {
386 } 246 first = pgid;
387 if (irb->scsw.cmd.cc == 3) { 247 continue;
388 CIO_MSG_EVENT(3, "NOP - Device %04x on Subchannel 0.%x.%04x," 248 }
389 " lpm %02X, became 'not operational'\n", 249 if (pgid_cmp(pgid, first) != 0)
390 cdev->private->dev_id.devno, sch->schid.ssid, 250 *mismatch = 1;
391 sch->schid.sch_no, cdev->private->imask);
392 return -EACCES;
393 } 251 }
394 return 0; 252 if (!first)
253 first = &channel_subsystems[0]->global_pgid;
254 *p = first;
395} 255}
396 256
397static void 257static u8 pgid_to_vpm(struct ccw_device *cdev)
398__ccw_device_verify_start(struct ccw_device *cdev)
399{ 258{
400 struct subchannel *sch; 259 struct subchannel *sch = to_subchannel(cdev->dev.parent);
401 __u8 func; 260 struct pgid *pgid;
402 int ret; 261 int i;
403 262 int lpm;
404 sch = to_subchannel(cdev->dev.parent); 263 u8 vpm = 0;
405 /* Repeat for all paths. */ 264
406 for (; cdev->private->imask; cdev->private->imask >>= 1, 265 /* Set VPM bits for paths which are already in the target state. */
407 cdev->private->iretry = 5) { 266 for (i = 0; i < 8; i++) {
408 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0) 267 lpm = 0x80 >> i;
409 /* Path not available, try next. */ 268 if ((cdev->private->pgid_valid_mask & lpm) == 0)
410 continue; 269 continue;
411 if (cdev->private->options.pgroup) { 270 pgid = &cdev->private->pgid[i];
412 if (sch->opm & cdev->private->imask) 271 if (sch->opm & lpm) {
413 func = SPID_FUNC_ESTABLISH; 272 if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
414 else 273 continue;
415 func = SPID_FUNC_RESIGN; 274 } else {
416 ret = __ccw_device_do_pgid(cdev, func); 275 if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
417 } else 276 continue;
418 ret = __ccw_device_do_nop(cdev); 277 }
419 /* We expect an interrupt in case of success or busy 278 if (cdev->private->flags.mpath) {
420 * indication. */ 279 if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
421 if (ret == 0 || ret == -EBUSY) 280 continue;
422 return; 281 } else {
423 /* Permanent path failure, try next. */ 282 if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
283 continue;
284 }
285 vpm |= lpm;
424 } 286 }
425 /* Done with all paths. */ 287
426 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV); 288 return vpm;
427} 289}
428 290
429/* 291static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
430 * Got interrupt for Set Path Group ID.
431 */
432void
433ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
434{ 292{
435 struct subchannel *sch; 293 int i;
436 struct irb *irb;
437 int ret;
438 294
439 irb = (struct irb *) __LC_IRB; 295 for (i = 0; i < 8; i++)
296 memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
297}
440 298
441 if (irb->scsw.cmd.stctl == 299/*
442 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 300 * Process SENSE PGID data and report result.
443 if (__ccw_device_should_retry(&irb->scsw)) 301 */
444 __ccw_device_verify_start(cdev); 302static void snid_done(struct ccw_device *cdev, int rc)
445 return; 303{
304 struct ccw_dev_id *id = &cdev->private->dev_id;
305 struct subchannel *sch = to_subchannel(cdev->dev.parent);
306 struct pgid *pgid;
307 int mismatch = 0;
308 int reserved = 0;
309 int reset = 0;
310
311 if (rc)
312 goto out;
313 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
314 if (reserved)
315 rc = -EUSERS;
316 else if (mismatch)
317 rc = -EOPNOTSUPP;
318 else {
319 sch->vpm = pgid_to_vpm(cdev);
320 pgid_fill(cdev, pgid);
446 } 321 }
447 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 322out:
448 return; 323 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
449 sch = to_subchannel(cdev->dev.parent); 324 "mism=%d rsvd=%d reset=%d\n", id->ssid, id->devno, rc,
450 if (cdev->private->options.pgroup) 325 cdev->private->pgid_valid_mask, sch->vpm, mismatch,
451 ret = __ccw_device_check_pgid(cdev); 326 reserved, reset);
452 else 327 switch (rc) {
453 ret = __ccw_device_check_nop(cdev);
454 memset(&cdev->private->irb, 0, sizeof(struct irb));
455
456 switch (ret) {
457 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
458 case 0: 328 case 0:
459 /* Path verification ccw finished successfully, update lpm. */ 329 /* Anything left to do? */
460 sch->vpm |= sch->opm & cdev->private->imask; 330 if (sch->vpm == sch->schib.pmcw.pam) {
461 /* Go on with next path. */ 331 verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
462 cdev->private->imask >>= 1; 332 return;
463 cdev->private->iretry = 5; 333 }
464 __ccw_device_verify_start(cdev); 334 /* Perform path-grouping. */
335 spid_start(cdev);
465 break; 336 break;
466 case -EOPNOTSUPP: 337 case -EOPNOTSUPP:
467 /* 338 /* Path-grouping not supported. */
468 * One of those strange devices which claim to be able 339 cdev->private->flags.pgroup = 0;
469 * to do multipathing but not for Set Path Group ID. 340 cdev->private->flags.mpath = 0;
470 */ 341 verify_start(cdev);
471 if (cdev->private->flags.pgid_single)
472 cdev->private->options.pgroup = 0;
473 else
474 cdev->private->flags.pgid_single = 1;
475 /* Retry */
476 sch->vpm = 0;
477 cdev->private->imask = 0x80;
478 cdev->private->iretry = 5;
479 /* fall through. */
480 case -EAGAIN: /* Try again. */
481 __ccw_device_verify_start(cdev);
482 break;
483 case -ETIME: /* Set path group id stopped by timeout. */
484 ccw_device_verify_done(cdev, -ETIME);
485 break;
486 case -EACCES: /* channel is not operational. */
487 cdev->private->imask >>= 1;
488 cdev->private->iretry = 5;
489 __ccw_device_verify_start(cdev);
490 break; 342 break;
343 default:
344 verify_done(cdev, rc);
491 } 345 }
492} 346}
493 347
494void 348/*
495ccw_device_verify_start(struct ccw_device *cdev) 349 * Create channel program to perform a SENSE PGID on a single path.
350 */
351static void snid_build_cp(struct ccw_device *cdev)
352{
353 struct ccw_request *req = &cdev->private->req;
354 struct ccw1 *cp = cdev->private->iccws;
355 int i = 8 - ffs(req->lpm);
356
357 /* Channel program setup. */
358 cp->cmd_code = CCW_CMD_SENSE_PGID;
359 cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
360 cp->count = sizeof(struct pgid);
361 cp->flags = CCW_FLAG_SLI;
362 req->cp = cp;
363}
364
365/*
366 * Perform SENSE PGID on a single path.
367 */
368static void snid_do(struct ccw_device *cdev)
496{ 369{
497 struct subchannel *sch = to_subchannel(cdev->dev.parent); 370 struct subchannel *sch = to_subchannel(cdev->dev.parent);
371 struct ccw_request *req = &cdev->private->req;
372
373 /* Adjust lpm if paths are not set in pam. */
374 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam);
375 if (!req->lpm)
376 goto out_nopath;
377 snid_build_cp(cdev);
378 ccw_request_start(cdev);
379 return;
380
381out_nopath:
382 snid_done(cdev, cdev->private->pgid_valid_mask ? 0 : -EACCES);
383}
498 384
499 cdev->private->flags.pgid_single = 0; 385/*
500 cdev->private->imask = 0x80; 386 * Process SENSE PGID request result for single path.
501 cdev->private->iretry = 5; 387 */
388static void snid_callback(struct ccw_device *cdev, void *data, int rc)
389{
390 struct ccw_request *req = &cdev->private->req;
391
392 if (rc == 0)
393 cdev->private->pgid_valid_mask |= req->lpm;
394 else if (rc != -EACCES)
395 goto err;
396 req->lpm >>= 1;
397 snid_do(cdev);
398 return;
399
400err:
401 snid_done(cdev, rc);
402}
502 403
503 /* Start with empty vpm. */ 404/*
504 sch->vpm = 0; 405 * Perform path verification.
406 */
407static void verify_start(struct ccw_device *cdev)
408{
409 struct subchannel *sch = to_subchannel(cdev->dev.parent);
410 struct ccw_request *req = &cdev->private->req;
411 struct ccw_dev_id *devid = &cdev->private->dev_id;
505 412
506 /* Get current pam. */ 413 sch->vpm = 0;
507 if (cio_update_schib(sch)) { 414 /* Initialize request data. */
508 ccw_device_verify_done(cdev, -ENODEV); 415 memset(req, 0, sizeof(*req));
509 return; 416 req->timeout = PGID_TIMEOUT;
417 req->maxretries = PGID_RETRIES;
418 req->lpm = 0x80;
419 if (cdev->private->flags.pgroup) {
420 CIO_TRACE_EVENT(4, "snid");
421 CIO_HEX_EVENT(4, devid, sizeof(*devid));
422 req->callback = snid_callback;
423 snid_do(cdev);
424 } else {
425 CIO_TRACE_EVENT(4, "nop");
426 CIO_HEX_EVENT(4, devid, sizeof(*devid));
427 req->filter = nop_filter;
428 req->callback = nop_callback;
429 nop_do(cdev);
510 } 430 }
511 /* After 60s path verification is considered to have failed. */
512 ccw_device_set_timeout(cdev, 60*HZ);
513 __ccw_device_verify_start(cdev);
514} 431}
515 432
516static void 433/**
517__ccw_device_disband_start(struct ccw_device *cdev) 434 * ccw_device_verify_start - perform path verification
435 * @cdev: ccw device
436 *
437 * Perform an I/O on each available channel path to @cdev to determine which
438 * paths are operational. The resulting path mask is stored in sch->vpm.
439 * If device options specify pathgrouping, establish a pathgroup for the
440 * operational paths. When finished, call ccw_device_verify_done with a
441 * return code specifying the result.
442 */
443void ccw_device_verify_start(struct ccw_device *cdev)
518{ 444{
519 struct subchannel *sch; 445 CIO_TRACE_EVENT(4, "vrfy");
520 int ret; 446 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
521 447 /* Initialize PGID data. */
522 sch = to_subchannel(cdev->dev.parent); 448 memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
523 while (cdev->private->imask != 0) { 449 cdev->private->pgid_valid_mask = 0;
524 if (sch->lpm & cdev->private->imask) { 450 /*
525 ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND); 451 * Initialize pathgroup and multipath state with target values.
526 if (ret == 0) 452 * They may change in the course of path verification.
527 return; 453 */
528 } 454 cdev->private->flags.pgroup = cdev->private->options.pgroup;
529 cdev->private->iretry = 5; 455 cdev->private->flags.mpath = cdev->private->options.mpath;
530 cdev->private->imask >>= 1; 456 cdev->private->flags.doverify = 0;
531 } 457 verify_start(cdev);
532 ccw_device_disband_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV);
533} 458}
534 459
535/* 460/*
536 * Got interrupt for Unset Path Group ID. 461 * Process disband SET PGID request result.
537 */ 462 */
538void 463static void disband_callback(struct ccw_device *cdev, void *data, int rc)
539ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
540{ 464{
541 struct subchannel *sch; 465 struct subchannel *sch = to_subchannel(cdev->dev.parent);
542 struct irb *irb; 466 struct ccw_dev_id *id = &cdev->private->dev_id;
543 int ret; 467
468 if (rc)
469 goto out;
470 /* Ensure consistent multipathing state at device and channel. */
471 cdev->private->flags.mpath = 0;
472 if (sch->config.mp) {
473 sch->config.mp = 0;
474 rc = cio_commit_config(sch);
475 }
476out:
477 CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
478 rc);
479 ccw_device_disband_done(cdev, rc);
480}
544 481
545 irb = (struct irb *) __LC_IRB; 482/**
483 * ccw_device_disband_start - disband pathgroup
484 * @cdev: ccw device
485 *
486 * Execute a SET PGID channel program on @cdev to disband a previously
487 * established pathgroup. When finished, call ccw_device_disband_done with
488 * a return code specifying the result.
489 */
490void ccw_device_disband_start(struct ccw_device *cdev)
491{
492 struct subchannel *sch = to_subchannel(cdev->dev.parent);
493 struct ccw_request *req = &cdev->private->req;
494 u8 fn;
495
496 CIO_TRACE_EVENT(4, "disb");
497 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
498 /* Request setup. */
499 memset(req, 0, sizeof(*req));
500 req->timeout = PGID_TIMEOUT;
501 req->maxretries = PGID_RETRIES;
502 req->lpm = sch->schib.pmcw.pam & sch->opm;
503 req->callback = disband_callback;
504 fn = SPID_FUNC_DISBAND;
505 if (cdev->private->flags.mpath)
506 fn |= SPID_FUNC_MULTI_PATH;
507 spid_build_cp(cdev, fn);
508 ccw_request_start(cdev);
509}
546 510
547 if (irb->scsw.cmd.stctl == 511static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
548 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 512{
549 if (__ccw_device_should_retry(&irb->scsw)) 513 struct ccw_request *req = &cdev->private->req;
550 __ccw_device_disband_start(cdev); 514 struct ccw1 *cp = cdev->private->iccws;
551 return; 515
552 } 516 cp[0].cmd_code = CCW_CMD_STLCK;
553 if (ccw_device_accumulate_and_sense(cdev, irb) != 0) 517 cp[0].cda = (u32) (addr_t) buf1;
554 return; 518 cp[0].count = 32;
555 sch = to_subchannel(cdev->dev.parent); 519 cp[0].flags = CCW_FLAG_CC;
556 ret = __ccw_device_check_pgid(cdev); 520 cp[1].cmd_code = CCW_CMD_RELEASE;
557 memset(&cdev->private->irb, 0, sizeof(struct irb)); 521 cp[1].cda = (u32) (addr_t) buf2;
558 switch (ret) { 522 cp[1].count = 32;
559 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 523 cp[1].flags = 0;
560 case 0: /* disband successful. */ 524 req->cp = cp;
561 ccw_device_disband_done(cdev, ret);
562 break;
563 case -EOPNOTSUPP:
564 /*
565 * One of those strange devices which claim to be able
566 * to do multipathing but not for Unset Path Group ID.
567 */
568 cdev->private->flags.pgid_single = 1;
569 /* fall through. */
570 case -EAGAIN: /* Try again. */
571 __ccw_device_disband_start(cdev);
572 break;
573 case -ETIME: /* Set path group id stopped by timeout. */
574 ccw_device_disband_done(cdev, -ETIME);
575 break;
576 case -EACCES: /* channel is not operational. */
577 cdev->private->imask >>= 1;
578 cdev->private->iretry = 5;
579 __ccw_device_disband_start(cdev);
580 break;
581 }
582} 525}
583 526
584void 527static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
585ccw_device_disband_start(struct ccw_device *cdev)
586{ 528{
587 /* After 60s disbanding is considered to have failed. */ 529 ccw_device_stlck_done(cdev, data, rc);
588 ccw_device_set_timeout(cdev, 60*HZ); 530}
589 531
590 cdev->private->flags.pgid_single = 0; 532/**
591 cdev->private->iretry = 5; 533 * ccw_device_stlck_start - perform unconditional release
592 cdev->private->imask = 0x80; 534 * @cdev: ccw device
593 __ccw_device_disband_start(cdev); 535 * @data: data pointer to be passed to ccw_device_stlck_done
536 * @buf1: data pointer used in channel program
537 * @buf2: data pointer used in channel program
538 *
539 * Execute a channel program on @cdev to release an existing PGID reservation.
540 * When finished, call ccw_device_stlck_done with a return code specifying the
541 * result.
542 */
543void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
544 void *buf2)
545{
546 struct subchannel *sch = to_subchannel(cdev->dev.parent);
547 struct ccw_request *req = &cdev->private->req;
548
549 CIO_TRACE_EVENT(4, "stlck");
550 CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
551 /* Request setup. */
552 memset(req, 0, sizeof(*req));
553 req->timeout = PGID_TIMEOUT;
554 req->maxretries = PGID_RETRIES;
555 req->lpm = sch->schib.pmcw.pam & sch->opm;
556 req->data = data;
557 req->callback = stlck_callback;
558 stlck_build_cp(cdev, buf1, buf2);
559 ccw_request_start(cdev);
594} 560}
561
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 5814dbee2410..66d8066ef22a 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -336,9 +336,6 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
336 sense_ccw->count = SENSE_MAX_COUNT; 336 sense_ccw->count = SENSE_MAX_COUNT;
337 sense_ccw->flags = CCW_FLAG_SLI; 337 sense_ccw->flags = CCW_FLAG_SLI;
338 338
339 /* Reset internal retry indication. */
340 cdev->private->flags.intretry = 0;
341
342 rc = cio_start(sch, sense_ccw, 0xff); 339 rc = cio_start(sch, sense_ccw, 0xff);
343 if (rc == -ENODEV || rc == -EACCES) 340 if (rc == -ENODEV || rc == -EACCES)
344 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 341 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 0b8f381bd20e..d72ae4c93af9 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -1,7 +1,10 @@
1#ifndef S390_IO_SCH_H 1#ifndef S390_IO_SCH_H
2#define S390_IO_SCH_H 2#define S390_IO_SCH_H
3 3
4#include <linux/types.h>
4#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h>
7#include "css.h"
5 8
6/* 9/*
7 * command-mode operation request block 10 * command-mode operation request block
@@ -68,6 +71,52 @@ struct io_subchannel_private {
68#define MAX_CIWS 8 71#define MAX_CIWS 8
69 72
70/* 73/*
74 * Possible status values for a CCW request's I/O.
75 */
76enum io_status {
77 IO_DONE,
78 IO_RUNNING,
79 IO_STATUS_ERROR,
80 IO_PATH_ERROR,
81 IO_REJECTED,
82 IO_KILLED
83};
84
85/**
86 * ccw_request - Internal CCW request.
87 * @cp: channel program to start
88 * @timeout: maximum allowable time in jiffies between start I/O and interrupt
89 * @maxretries: number of retries per I/O operation and path
90 * @lpm: mask of paths to use
91 * @check: optional callback that determines if results are final
92 * @filter: optional callback to adjust request status based on IRB data
93 * @callback: final callback
94 * @data: user-defined pointer passed to all callbacks
95 * @mask: current path mask
96 * @retries: current number of retries
97 * @drc: delayed return code
98 * @cancel: non-zero if request was cancelled
99 * @done: non-zero if request was finished
100 */
101struct ccw_request {
102 struct ccw1 *cp;
103 unsigned long timeout;
104 u16 maxretries;
105 u8 lpm;
106 int (*check)(struct ccw_device *, void *);
107 enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
108 enum io_status);
109 void (*callback)(struct ccw_device *, void *, int);
110 void *data;
111 /* These fields are used internally. */
112 u16 mask;
113 u16 retries;
114 int drc;
115 int cancel:1;
116 int done:1;
117} __attribute__((packed));
118
119/*
71 * sense-id response buffer layout 120 * sense-id response buffer layout
72 */ 121 */
73struct senseid { 122struct senseid {
@@ -82,32 +131,43 @@ struct senseid {
82 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ 131 struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
83} __attribute__ ((packed, aligned(4))); 132} __attribute__ ((packed, aligned(4)));
84 133
134enum cdev_todo {
135 CDEV_TODO_NOTHING,
136 CDEV_TODO_ENABLE_CMF,
137 CDEV_TODO_REBIND,
138 CDEV_TODO_REGISTER,
139 CDEV_TODO_UNREG,
140 CDEV_TODO_UNREG_EVAL,
141};
142
85struct ccw_device_private { 143struct ccw_device_private {
86 struct ccw_device *cdev; 144 struct ccw_device *cdev;
87 struct subchannel *sch; 145 struct subchannel *sch;
88 int state; /* device state */ 146 int state; /* device state */
89 atomic_t onoff; 147 atomic_t onoff;
90 unsigned long registered;
91 struct ccw_dev_id dev_id; /* device id */ 148 struct ccw_dev_id dev_id; /* device id */
92 struct subchannel_id schid; /* subchannel number */ 149 struct subchannel_id schid; /* subchannel number */
93 u8 imask; /* lpm mask for SNID/SID/SPGID */ 150 struct ccw_request req; /* internal I/O request */
94 int iretry; /* retry counter SNID/SID/SPGID */ 151 int iretry;
152 u8 pgid_valid_mask; /* mask of valid PGIDs */
95 struct { 153 struct {
96 unsigned int fast:1; /* post with "channel end" */ 154 unsigned int fast:1; /* post with "channel end" */
97 unsigned int repall:1; /* report every interrupt status */ 155 unsigned int repall:1; /* report every interrupt status */
98 unsigned int pgroup:1; /* do path grouping */ 156 unsigned int pgroup:1; /* do path grouping */
99 unsigned int force:1; /* allow forced online */ 157 unsigned int force:1; /* allow forced online */
158 unsigned int mpath:1; /* do multipathing */
100 } __attribute__ ((packed)) options; 159 } __attribute__ ((packed)) options;
101 struct { 160 struct {
102 unsigned int pgid_single:1; /* use single path for Set PGID */
103 unsigned int esid:1; /* Ext. SenseID supported by HW */ 161 unsigned int esid:1; /* Ext. SenseID supported by HW */
104 unsigned int dosense:1; /* delayed SENSE required */ 162 unsigned int dosense:1; /* delayed SENSE required */
105 unsigned int doverify:1; /* delayed path verification */ 163 unsigned int doverify:1; /* delayed path verification */
106 unsigned int donotify:1; /* call notify function */ 164 unsigned int donotify:1; /* call notify function */
107 unsigned int recog_done:1; /* dev. recog. complete */ 165 unsigned int recog_done:1; /* dev. recog. complete */
108 unsigned int fake_irb:1; /* deliver faked irb */ 166 unsigned int fake_irb:1; /* deliver faked irb */
109 unsigned int intretry:1; /* retry internal operation */
110 unsigned int resuming:1; /* recognition while resume */ 167 unsigned int resuming:1; /* recognition while resume */
168 unsigned int pgroup:1; /* pathgroup is set up */
169 unsigned int mpath:1; /* multipathing is set up */
170 unsigned int initialized:1; /* set if initial reference held */
111 } __attribute__((packed)) flags; 171 } __attribute__((packed)) flags;
112 unsigned long intparm; /* user interruption parameter */ 172 unsigned long intparm; /* user interruption parameter */
113 struct qdio_irq *qdio_data; 173 struct qdio_irq *qdio_data;
@@ -115,7 +175,8 @@ struct ccw_device_private {
115 struct senseid senseid; /* SenseID info */ 175 struct senseid senseid; /* SenseID info */
116 struct pgid pgid[8]; /* path group IDs per chpid*/ 176 struct pgid pgid[8]; /* path group IDs per chpid*/
117 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ 177 struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
118 struct work_struct kick_work; 178 struct work_struct todo_work;
179 enum cdev_todo todo;
119 wait_queue_head_t wait_q; 180 wait_queue_head_t wait_q;
120 struct timer_list timer; 181 struct timer_list timer;
121 void *cmb; /* measurement information */ 182 void *cmb; /* measurement information */