diff options
author | Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 2009-12-07 06:51:25 -0500 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2009-12-07 06:51:31 -0500 |
commit | e1f0fbd655539b0093738f58d57db83a0ac2dd6c (patch) | |
tree | 2f0d37096b2cd563e113fd0b1a378bc8387ba8a9 /drivers/s390/cio/ccwreq.c | |
parent | 16b9a0571da4ee5cd15ca75e871722b0b5aee64d (diff) |
[S390] cio: consistent infrastructure for internal I/O requests
Reduce code duplication by introducing a central infrastructure to
perform an internal I/O operation on a CCW device.
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/ccwreq.c')
-rw-r--r-- | drivers/s390/cio/ccwreq.c | 327 |
1 files changed, 327 insertions, 0 deletions
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c new file mode 100644 index 000000000000..a6e205a384b2 --- /dev/null +++ b/drivers/s390/cio/ccwreq.c | |||
@@ -0,0 +1,327 @@ | |||
1 | /* | ||
2 | * Handling of internal CCW device requests. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/err.h> | ||
10 | #include <asm/ccwdev.h> | ||
11 | #include <asm/cio.h> | ||
12 | |||
13 | #include "io_sch.h" | ||
14 | #include "cio.h" | ||
15 | #include "device.h" | ||
16 | #include "cio_debug.h" | ||
17 | |||
18 | /** | ||
19 | * lpm_adjust - adjust path mask | ||
20 | * @lpm: path mask to adjust | ||
21 | * @mask: mask of available paths | ||
22 | * | ||
23 | * Shift @lpm right until @lpm and @mask have at least one bit in common or | ||
24 | * until @lpm is zero. Return the resulting lpm. | ||
25 | */ | ||
26 | int lpm_adjust(int lpm, int mask) | ||
27 | { | ||
28 | while (lpm && ((lpm & mask) == 0)) | ||
29 | lpm >>= 1; | ||
30 | return lpm; | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * Adjust path mask to use next path and reset retry count. Return resulting | ||
35 | * path mask. | ||
36 | */ | ||
37 | static u16 ccwreq_next_path(struct ccw_device *cdev) | ||
38 | { | ||
39 | struct ccw_request *req = &cdev->private->req; | ||
40 | |||
41 | req->retries = req->maxretries; | ||
42 | req->mask = lpm_adjust(req->mask >>= 1, req->lpm); | ||
43 | |||
44 | return req->mask; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Clean up device state and report to callback. | ||
49 | */ | ||
50 | static void ccwreq_stop(struct ccw_device *cdev, int rc) | ||
51 | { | ||
52 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
53 | struct ccw_request *req = &cdev->private->req; | ||
54 | |||
55 | if (req->done) | ||
56 | return; | ||
57 | req->done = 1; | ||
58 | ccw_device_set_timeout(cdev, 0); | ||
59 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
60 | sch->lpm = sch->schib.pmcw.pam; | ||
61 | if (rc && rc != -ENODEV && req->drc) | ||
62 | rc = req->drc; | ||
63 | req->callback(cdev, req->data, rc); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * (Re-)Start the operation until retries and paths are exhausted. | ||
68 | */ | ||
69 | static void ccwreq_do(struct ccw_device *cdev) | ||
70 | { | ||
71 | struct ccw_request *req = &cdev->private->req; | ||
72 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
73 | struct ccw1 *cp = req->cp; | ||
74 | int rc = -EACCES; | ||
75 | |||
76 | while (req->mask) { | ||
77 | if (req->retries-- == 0) { | ||
78 | /* Retries exhausted, try next path. */ | ||
79 | ccwreq_next_path(cdev); | ||
80 | continue; | ||
81 | } | ||
82 | /* Perform start function. */ | ||
83 | sch->lpm = 0xff; | ||
84 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
85 | rc = cio_start(sch, cp, req->mask); | ||
86 | if (rc == 0) { | ||
87 | /* I/O started successfully. */ | ||
88 | ccw_device_set_timeout(cdev, req->timeout); | ||
89 | return; | ||
90 | } | ||
91 | if (rc == -ENODEV) { | ||
92 | /* Permanent device error. */ | ||
93 | break; | ||
94 | } | ||
95 | if (rc == -EACCES) { | ||
96 | /* Permant path error. */ | ||
97 | ccwreq_next_path(cdev); | ||
98 | continue; | ||
99 | } | ||
100 | /* Temporary improper status. */ | ||
101 | rc = cio_clear(sch); | ||
102 | if (rc) | ||
103 | break; | ||
104 | return; | ||
105 | } | ||
106 | ccwreq_stop(cdev, rc); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * ccw_request_start - perform I/O request | ||
111 | * @cdev: ccw device | ||
112 | * | ||
113 | * Perform the I/O request specified by cdev->req. | ||
114 | */ | ||
115 | void ccw_request_start(struct ccw_device *cdev) | ||
116 | { | ||
117 | struct ccw_request *req = &cdev->private->req; | ||
118 | |||
119 | req->mask = 0x80; | ||
120 | req->retries = req->maxretries; | ||
121 | req->mask = lpm_adjust(req->mask, req->lpm); | ||
122 | req->drc = 0; | ||
123 | req->done = 0; | ||
124 | req->cancel = 0; | ||
125 | if (!req->mask) | ||
126 | goto out_nopath; | ||
127 | ccwreq_do(cdev); | ||
128 | return; | ||
129 | |||
130 | out_nopath: | ||
131 | ccwreq_stop(cdev, -EACCES); | ||
132 | } | ||
133 | |||
134 | /** | ||
135 | * ccw_request_cancel - cancel running I/O request | ||
136 | * @cdev: ccw device | ||
137 | * | ||
138 | * Cancel the I/O request specified by cdev->req. Return non-zero if request | ||
139 | * has already finished, zero otherwise. | ||
140 | */ | ||
141 | int ccw_request_cancel(struct ccw_device *cdev) | ||
142 | { | ||
143 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
144 | struct ccw_request *req = &cdev->private->req; | ||
145 | int rc; | ||
146 | |||
147 | if (req->done) | ||
148 | return 1; | ||
149 | req->cancel = 1; | ||
150 | rc = cio_clear(sch); | ||
151 | if (rc) | ||
152 | ccwreq_stop(cdev, rc); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | /* | ||
157 | * Return the status of the internal I/O started on the specified ccw device. | ||
158 | * Perform BASIC SENSE if required. | ||
159 | */ | ||
160 | static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) | ||
161 | { | ||
162 | struct irb *irb = &cdev->private->irb; | ||
163 | struct cmd_scsw *scsw = &irb->scsw.cmd; | ||
164 | |||
165 | /* Perform BASIC SENSE if needed. */ | ||
166 | if (ccw_device_accumulate_and_sense(cdev, lcirb)) | ||
167 | return IO_RUNNING; | ||
168 | /* Check for halt/clear interrupt. */ | ||
169 | if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) | ||
170 | return IO_KILLED; | ||
171 | /* Check for path error. */ | ||
172 | if (scsw->cc == 3 || scsw->pno) | ||
173 | return IO_PATH_ERROR; | ||
174 | /* Handle BASIC SENSE data. */ | ||
175 | if (irb->esw.esw0.erw.cons) { | ||
176 | CIO_TRACE_EVENT(2, "sensedata"); | ||
177 | CIO_HEX_EVENT(2, &cdev->private->dev_id, | ||
178 | sizeof(struct ccw_dev_id)); | ||
179 | CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT); | ||
180 | /* Check for command reject. */ | ||
181 | if (irb->ecw[0] & SNS0_CMD_REJECT) | ||
182 | return IO_REJECTED; | ||
183 | /* Assume that unexpected SENSE data implies an error. */ | ||
184 | return IO_STATUS_ERROR; | ||
185 | } | ||
186 | /* Check for channel errors. */ | ||
187 | if (scsw->cstat != 0) | ||
188 | return IO_STATUS_ERROR; | ||
189 | /* Check for device errors. */ | ||
190 | if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
191 | return IO_STATUS_ERROR; | ||
192 | /* Check for final state. */ | ||
193 | if (!(scsw->dstat & DEV_STAT_DEV_END)) | ||
194 | return IO_RUNNING; | ||
195 | /* Check for other improper status. */ | ||
196 | if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS)) | ||
197 | return IO_STATUS_ERROR; | ||
198 | return IO_DONE; | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * Log ccw request status. | ||
203 | */ | ||
204 | static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) | ||
205 | { | ||
206 | struct ccw_request *req = &cdev->private->req; | ||
207 | struct { | ||
208 | struct ccw_dev_id dev_id; | ||
209 | u16 retries; | ||
210 | u8 lpm; | ||
211 | u8 status; | ||
212 | } __attribute__ ((packed)) data; | ||
213 | data.dev_id = cdev->private->dev_id; | ||
214 | data.retries = req->retries; | ||
215 | data.lpm = req->mask; | ||
216 | data.status = (u8) status; | ||
217 | CIO_TRACE_EVENT(2, "reqstat"); | ||
218 | CIO_HEX_EVENT(2, &data, sizeof(data)); | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * ccw_request_handler - interrupt handler for I/O request procedure. | ||
223 | * @cdev: ccw device | ||
224 | * | ||
225 | * Handle interrupt during I/O request procedure. | ||
226 | */ | ||
227 | void ccw_request_handler(struct ccw_device *cdev) | ||
228 | { | ||
229 | struct ccw_request *req = &cdev->private->req; | ||
230 | struct irb *irb = (struct irb *) __LC_IRB; | ||
231 | enum io_status status; | ||
232 | int rc = -EOPNOTSUPP; | ||
233 | |||
234 | /* Check status of I/O request. */ | ||
235 | status = ccwreq_status(cdev, irb); | ||
236 | if (req->filter) | ||
237 | status = req->filter(cdev, req->data, irb, status); | ||
238 | if (status != IO_RUNNING) | ||
239 | ccw_device_set_timeout(cdev, 0); | ||
240 | if (status != IO_DONE && status != IO_RUNNING) | ||
241 | ccwreq_log_status(cdev, status); | ||
242 | switch (status) { | ||
243 | case IO_DONE: | ||
244 | break; | ||
245 | case IO_RUNNING: | ||
246 | return; | ||
247 | case IO_REJECTED: | ||
248 | goto err; | ||
249 | case IO_PATH_ERROR: | ||
250 | goto out_next_path; | ||
251 | case IO_STATUS_ERROR: | ||
252 | goto out_restart; | ||
253 | case IO_KILLED: | ||
254 | /* Check if request was cancelled on purpose. */ | ||
255 | if (req->cancel) { | ||
256 | rc = -EIO; | ||
257 | goto err; | ||
258 | } | ||
259 | goto out_restart; | ||
260 | } | ||
261 | /* Check back with request initiator. */ | ||
262 | if (!req->check) | ||
263 | goto out; | ||
264 | switch (req->check(cdev, req->data)) { | ||
265 | case 0: | ||
266 | break; | ||
267 | case -EAGAIN: | ||
268 | goto out_restart; | ||
269 | case -EACCES: | ||
270 | goto out_next_path; | ||
271 | default: | ||
272 | goto err; | ||
273 | } | ||
274 | out: | ||
275 | ccwreq_stop(cdev, 0); | ||
276 | return; | ||
277 | |||
278 | out_next_path: | ||
279 | /* Try next path and restart I/O. */ | ||
280 | if (!ccwreq_next_path(cdev)) { | ||
281 | rc = -EACCES; | ||
282 | goto err; | ||
283 | } | ||
284 | out_restart: | ||
285 | /* Restart. */ | ||
286 | ccwreq_do(cdev); | ||
287 | return; | ||
288 | err: | ||
289 | ccwreq_stop(cdev, rc); | ||
290 | } | ||
291 | |||
292 | |||
293 | /** | ||
294 | * ccw_request_timeout - timeout handler for I/O request procedure | ||
295 | * @cdev: ccw device | ||
296 | * | ||
297 | * Handle timeout during I/O request procedure. | ||
298 | */ | ||
299 | void ccw_request_timeout(struct ccw_device *cdev) | ||
300 | { | ||
301 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
302 | struct ccw_request *req = &cdev->private->req; | ||
303 | int rc; | ||
304 | |||
305 | if (!ccwreq_next_path(cdev)) { | ||
306 | /* set the final return code for this request */ | ||
307 | req->drc = -ETIME; | ||
308 | } | ||
309 | rc = cio_clear(sch); | ||
310 | if (rc) | ||
311 | goto err; | ||
312 | return; | ||
313 | |||
314 | err: | ||
315 | ccwreq_stop(cdev, rc); | ||
316 | } | ||
317 | |||
318 | /** | ||
319 | * ccw_request_notoper - notoper handler for I/O request procedure | ||
320 | * @cdev: ccw device | ||
321 | * | ||
322 | * Handle timeout during I/O request procedure. | ||
323 | */ | ||
324 | void ccw_request_notoper(struct ccw_device *cdev) | ||
325 | { | ||
326 | ccwreq_stop(cdev, -ENODEV); | ||
327 | } | ||