diff options
Diffstat (limited to 'drivers/s390/cio/ccwreq.c')
-rw-r--r-- | drivers/s390/cio/ccwreq.c | 328 |
1 files changed, 328 insertions, 0 deletions
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c new file mode 100644 index 000000000000..9509e3860934 --- /dev/null +++ b/drivers/s390/cio/ccwreq.c | |||
@@ -0,0 +1,328 @@ | |||
1 | /* | ||
2 | * Handling of internal CCW device requests. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/err.h> | ||
10 | #include <asm/ccwdev.h> | ||
11 | #include <asm/cio.h> | ||
12 | |||
13 | #include "io_sch.h" | ||
14 | #include "cio.h" | ||
15 | #include "device.h" | ||
16 | #include "cio_debug.h" | ||
17 | |||
18 | /** | ||
19 | * lpm_adjust - adjust path mask | ||
20 | * @lpm: path mask to adjust | ||
21 | * @mask: mask of available paths | ||
22 | * | ||
23 | * Shift @lpm right until @lpm and @mask have at least one bit in common or | ||
24 | * until @lpm is zero. Return the resulting lpm. | ||
25 | */ | ||
26 | int lpm_adjust(int lpm, int mask) | ||
27 | { | ||
28 | while (lpm && ((lpm & mask) == 0)) | ||
29 | lpm >>= 1; | ||
30 | return lpm; | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * Adjust path mask to use next path and reset retry count. Return resulting | ||
35 | * path mask. | ||
36 | */ | ||
37 | static u16 ccwreq_next_path(struct ccw_device *cdev) | ||
38 | { | ||
39 | struct ccw_request *req = &cdev->private->req; | ||
40 | |||
41 | req->retries = req->maxretries; | ||
42 | req->mask = lpm_adjust(req->mask >>= 1, req->lpm); | ||
43 | |||
44 | return req->mask; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Clean up device state and report to callback. | ||
49 | */ | ||
50 | static void ccwreq_stop(struct ccw_device *cdev, int rc) | ||
51 | { | ||
52 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
53 | struct ccw_request *req = &cdev->private->req; | ||
54 | |||
55 | if (req->done) | ||
56 | return; | ||
57 | req->done = 1; | ||
58 | ccw_device_set_timeout(cdev, 0); | ||
59 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
60 | sch->lpm = sch->schib.pmcw.pam; | ||
61 | if (rc && rc != -ENODEV && req->drc) | ||
62 | rc = req->drc; | ||
63 | req->callback(cdev, req->data, rc); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * (Re-)Start the operation until retries and paths are exhausted. | ||
68 | */ | ||
69 | static void ccwreq_do(struct ccw_device *cdev) | ||
70 | { | ||
71 | struct ccw_request *req = &cdev->private->req; | ||
72 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
73 | struct ccw1 *cp = req->cp; | ||
74 | int rc = -EACCES; | ||
75 | |||
76 | while (req->mask) { | ||
77 | if (req->retries-- == 0) { | ||
78 | /* Retries exhausted, try next path. */ | ||
79 | ccwreq_next_path(cdev); | ||
80 | continue; | ||
81 | } | ||
82 | /* Perform start function. */ | ||
83 | sch->lpm = 0xff; | ||
84 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
85 | rc = cio_start(sch, cp, (u8) req->mask); | ||
86 | if (rc == 0) { | ||
87 | /* I/O started successfully. */ | ||
88 | ccw_device_set_timeout(cdev, req->timeout); | ||
89 | return; | ||
90 | } | ||
91 | if (rc == -ENODEV) { | ||
92 | /* Permanent device error. */ | ||
93 | break; | ||
94 | } | ||
95 | if (rc == -EACCES) { | ||
96 | /* Permant path error. */ | ||
97 | ccwreq_next_path(cdev); | ||
98 | continue; | ||
99 | } | ||
100 | /* Temporary improper status. */ | ||
101 | rc = cio_clear(sch); | ||
102 | if (rc) | ||
103 | break; | ||
104 | return; | ||
105 | } | ||
106 | ccwreq_stop(cdev, rc); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * ccw_request_start - perform I/O request | ||
111 | * @cdev: ccw device | ||
112 | * | ||
113 | * Perform the I/O request specified by cdev->req. | ||
114 | */ | ||
115 | void ccw_request_start(struct ccw_device *cdev) | ||
116 | { | ||
117 | struct ccw_request *req = &cdev->private->req; | ||
118 | |||
119 | /* Try all paths twice to counter link flapping. */ | ||
120 | req->mask = 0x8080; | ||
121 | req->retries = req->maxretries; | ||
122 | req->mask = lpm_adjust(req->mask, req->lpm); | ||
123 | req->drc = 0; | ||
124 | req->done = 0; | ||
125 | req->cancel = 0; | ||
126 | if (!req->mask) | ||
127 | goto out_nopath; | ||
128 | ccwreq_do(cdev); | ||
129 | return; | ||
130 | |||
131 | out_nopath: | ||
132 | ccwreq_stop(cdev, -EACCES); | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * ccw_request_cancel - cancel running I/O request | ||
137 | * @cdev: ccw device | ||
138 | * | ||
139 | * Cancel the I/O request specified by cdev->req. Return non-zero if request | ||
140 | * has already finished, zero otherwise. | ||
141 | */ | ||
142 | int ccw_request_cancel(struct ccw_device *cdev) | ||
143 | { | ||
144 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
145 | struct ccw_request *req = &cdev->private->req; | ||
146 | int rc; | ||
147 | |||
148 | if (req->done) | ||
149 | return 1; | ||
150 | req->cancel = 1; | ||
151 | rc = cio_clear(sch); | ||
152 | if (rc) | ||
153 | ccwreq_stop(cdev, rc); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Return the status of the internal I/O started on the specified ccw device. | ||
159 | * Perform BASIC SENSE if required. | ||
160 | */ | ||
161 | static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb) | ||
162 | { | ||
163 | struct irb *irb = &cdev->private->irb; | ||
164 | struct cmd_scsw *scsw = &irb->scsw.cmd; | ||
165 | |||
166 | /* Perform BASIC SENSE if needed. */ | ||
167 | if (ccw_device_accumulate_and_sense(cdev, lcirb)) | ||
168 | return IO_RUNNING; | ||
169 | /* Check for halt/clear interrupt. */ | ||
170 | if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) | ||
171 | return IO_KILLED; | ||
172 | /* Check for path error. */ | ||
173 | if (scsw->cc == 3 || scsw->pno) | ||
174 | return IO_PATH_ERROR; | ||
175 | /* Handle BASIC SENSE data. */ | ||
176 | if (irb->esw.esw0.erw.cons) { | ||
177 | CIO_TRACE_EVENT(2, "sensedata"); | ||
178 | CIO_HEX_EVENT(2, &cdev->private->dev_id, | ||
179 | sizeof(struct ccw_dev_id)); | ||
180 | CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT); | ||
181 | /* Check for command reject. */ | ||
182 | if (irb->ecw[0] & SNS0_CMD_REJECT) | ||
183 | return IO_REJECTED; | ||
184 | /* Assume that unexpected SENSE data implies an error. */ | ||
185 | return IO_STATUS_ERROR; | ||
186 | } | ||
187 | /* Check for channel errors. */ | ||
188 | if (scsw->cstat != 0) | ||
189 | return IO_STATUS_ERROR; | ||
190 | /* Check for device errors. */ | ||
191 | if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) | ||
192 | return IO_STATUS_ERROR; | ||
193 | /* Check for final state. */ | ||
194 | if (!(scsw->dstat & DEV_STAT_DEV_END)) | ||
195 | return IO_RUNNING; | ||
196 | /* Check for other improper status. */ | ||
197 | if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS)) | ||
198 | return IO_STATUS_ERROR; | ||
199 | return IO_DONE; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * Log ccw request status. | ||
204 | */ | ||
205 | static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status) | ||
206 | { | ||
207 | struct ccw_request *req = &cdev->private->req; | ||
208 | struct { | ||
209 | struct ccw_dev_id dev_id; | ||
210 | u16 retries; | ||
211 | u8 lpm; | ||
212 | u8 status; | ||
213 | } __attribute__ ((packed)) data; | ||
214 | data.dev_id = cdev->private->dev_id; | ||
215 | data.retries = req->retries; | ||
216 | data.lpm = (u8) req->mask; | ||
217 | data.status = (u8) status; | ||
218 | CIO_TRACE_EVENT(2, "reqstat"); | ||
219 | CIO_HEX_EVENT(2, &data, sizeof(data)); | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * ccw_request_handler - interrupt handler for I/O request procedure. | ||
224 | * @cdev: ccw device | ||
225 | * | ||
226 | * Handle interrupt during I/O request procedure. | ||
227 | */ | ||
228 | void ccw_request_handler(struct ccw_device *cdev) | ||
229 | { | ||
230 | struct ccw_request *req = &cdev->private->req; | ||
231 | struct irb *irb = (struct irb *) __LC_IRB; | ||
232 | enum io_status status; | ||
233 | int rc = -EOPNOTSUPP; | ||
234 | |||
235 | /* Check status of I/O request. */ | ||
236 | status = ccwreq_status(cdev, irb); | ||
237 | if (req->filter) | ||
238 | status = req->filter(cdev, req->data, irb, status); | ||
239 | if (status != IO_RUNNING) | ||
240 | ccw_device_set_timeout(cdev, 0); | ||
241 | if (status != IO_DONE && status != IO_RUNNING) | ||
242 | ccwreq_log_status(cdev, status); | ||
243 | switch (status) { | ||
244 | case IO_DONE: | ||
245 | break; | ||
246 | case IO_RUNNING: | ||
247 | return; | ||
248 | case IO_REJECTED: | ||
249 | goto err; | ||
250 | case IO_PATH_ERROR: | ||
251 | goto out_next_path; | ||
252 | case IO_STATUS_ERROR: | ||
253 | goto out_restart; | ||
254 | case IO_KILLED: | ||
255 | /* Check if request was cancelled on purpose. */ | ||
256 | if (req->cancel) { | ||
257 | rc = -EIO; | ||
258 | goto err; | ||
259 | } | ||
260 | goto out_restart; | ||
261 | } | ||
262 | /* Check back with request initiator. */ | ||
263 | if (!req->check) | ||
264 | goto out; | ||
265 | switch (req->check(cdev, req->data)) { | ||
266 | case 0: | ||
267 | break; | ||
268 | case -EAGAIN: | ||
269 | goto out_restart; | ||
270 | case -EACCES: | ||
271 | goto out_next_path; | ||
272 | default: | ||
273 | goto err; | ||
274 | } | ||
275 | out: | ||
276 | ccwreq_stop(cdev, 0); | ||
277 | return; | ||
278 | |||
279 | out_next_path: | ||
280 | /* Try next path and restart I/O. */ | ||
281 | if (!ccwreq_next_path(cdev)) { | ||
282 | rc = -EACCES; | ||
283 | goto err; | ||
284 | } | ||
285 | out_restart: | ||
286 | /* Restart. */ | ||
287 | ccwreq_do(cdev); | ||
288 | return; | ||
289 | err: | ||
290 | ccwreq_stop(cdev, rc); | ||
291 | } | ||
292 | |||
293 | |||
294 | /** | ||
295 | * ccw_request_timeout - timeout handler for I/O request procedure | ||
296 | * @cdev: ccw device | ||
297 | * | ||
298 | * Handle timeout during I/O request procedure. | ||
299 | */ | ||
300 | void ccw_request_timeout(struct ccw_device *cdev) | ||
301 | { | ||
302 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
303 | struct ccw_request *req = &cdev->private->req; | ||
304 | int rc; | ||
305 | |||
306 | if (!ccwreq_next_path(cdev)) { | ||
307 | /* set the final return code for this request */ | ||
308 | req->drc = -ETIME; | ||
309 | } | ||
310 | rc = cio_clear(sch); | ||
311 | if (rc) | ||
312 | goto err; | ||
313 | return; | ||
314 | |||
315 | err: | ||
316 | ccwreq_stop(cdev, rc); | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * ccw_request_notoper - notoper handler for I/O request procedure | ||
321 | * @cdev: ccw device | ||
322 | * | ||
323 | * Handle timeout during I/O request procedure. | ||
324 | */ | ||
325 | void ccw_request_notoper(struct ccw_device *cdev) | ||
326 | { | ||
327 | ccwreq_stop(cdev, -ENODEV); | ||
328 | } | ||