diff options
Diffstat (limited to 'drivers/s390/block/dasd_eer.c')
-rw-r--r-- | drivers/s390/block/dasd_eer.c | 682 |
1 files changed, 682 insertions, 0 deletions
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c new file mode 100644 index 000000000000..2d946b6ca074 --- /dev/null +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -0,0 +1,682 @@ | |||
1 | /* | ||
2 | * Character device driver for extended error reporting. | ||
3 | * | ||
4 | * Copyright (C) 2005 IBM Corporation | ||
5 | * extended error reporting for DASD ECKD devices | ||
6 | * Author(s): Stefan Weinhuber <wein@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/miscdevice.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/poll.h> | ||
17 | |||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/semaphore.h> | ||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/ebcdic.h> | ||
22 | |||
23 | #include "dasd_int.h" | ||
24 | #include "dasd_eckd.h" | ||
25 | |||
26 | #ifdef PRINTK_HEADER | ||
27 | #undef PRINTK_HEADER | ||
28 | #endif /* PRINTK_HEADER */ | ||
29 | #define PRINTK_HEADER "dasd(eer):" | ||
30 | |||
31 | /* | ||
32 | * SECTION: the internal buffer | ||
33 | */ | ||
34 | |||
35 | /* | ||
36 | * The internal buffer is meant to store obaque blobs of data, so it does | ||
37 | * not know of higher level concepts like triggers. | ||
38 | * It consists of a number of pages that are used as a ringbuffer. Each data | ||
39 | * blob is stored in a simple record that consists of an integer, which | ||
40 | * contains the size of the following data, and the data bytes themselfes. | ||
41 | * | ||
42 | * To allow for multiple independent readers we create one internal buffer | ||
43 | * each time the device is opened and destroy the buffer when the file is | ||
44 | * closed again. The number of pages used for this buffer is determined by | ||
45 | * the module parmeter eer_pages. | ||
46 | * | ||
47 | * One record can be written to a buffer by using the functions | ||
48 | * - dasd_eer_start_record (one time per record to write the size to the | ||
49 | * buffer and reserve the space for the data) | ||
50 | * - dasd_eer_write_buffer (one or more times per record to write the data) | ||
51 | * The data can be written in several steps but you will have to compute | ||
52 | * the total size up front for the invocation of dasd_eer_start_record. | ||
53 | * If the ringbuffer is full, dasd_eer_start_record will remove the required | ||
54 | * number of old records. | ||
55 | * | ||
56 | * A record is typically read in two steps, first read the integer that | ||
57 | * specifies the size of the following data, then read the data. | ||
58 | * Both can be done by | ||
59 | * - dasd_eer_read_buffer | ||
60 | * | ||
61 | * For all mentioned functions you need to get the bufferlock first and keep | ||
62 | * it until a complete record is written or read. | ||
63 | * | ||
64 | * All information necessary to keep track of an internal buffer is kept in | ||
65 | * a struct eerbuffer. The buffer specific to a file pointer is strored in | ||
66 | * the private_data field of that file. To be able to write data to all | ||
67 | * existing buffers, each buffer is also added to the bufferlist. | ||
68 | * If the user does not want to read a complete record in one go, we have to | ||
69 | * keep track of the rest of the record. residual stores the number of bytes | ||
70 | * that are still to deliver. If the rest of the record is invalidated between | ||
71 | * two reads then residual will be set to -1 so that the next read will fail. | ||
72 | * All entries in the eerbuffer structure are protected with the bufferlock. | ||
73 | * To avoid races between writing to a buffer on the one side and creating | ||
74 | * and destroying buffers on the other side, the bufferlock must also be used | ||
75 | * to protect the bufferlist. | ||
76 | */ | ||
77 | |||
78 | static int eer_pages = 5; | ||
79 | module_param(eer_pages, int, S_IRUGO|S_IWUSR); | ||
80 | |||
81 | struct eerbuffer { | ||
82 | struct list_head list; | ||
83 | char **buffer; | ||
84 | int buffersize; | ||
85 | int buffer_page_count; | ||
86 | int head; | ||
87 | int tail; | ||
88 | int residual; | ||
89 | }; | ||
90 | |||
91 | static LIST_HEAD(bufferlist); | ||
92 | static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; | ||
93 | static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); | ||
94 | |||
95 | /* | ||
96 | * How many free bytes are available on the buffer. | ||
97 | * Needs to be called with bufferlock held. | ||
98 | */ | ||
99 | static int dasd_eer_get_free_bytes(struct eerbuffer *eerb) | ||
100 | { | ||
101 | if (eerb->head < eerb->tail) | ||
102 | return eerb->tail - eerb->head - 1; | ||
103 | return eerb->buffersize - eerb->head + eerb->tail -1; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * How many bytes of buffer space are used. | ||
108 | * Needs to be called with bufferlock held. | ||
109 | */ | ||
110 | static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb) | ||
111 | { | ||
112 | |||
113 | if (eerb->head >= eerb->tail) | ||
114 | return eerb->head - eerb->tail; | ||
115 | return eerb->buffersize - eerb->tail + eerb->head; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * The dasd_eer_write_buffer function just copies count bytes of data | ||
120 | * to the buffer. Make sure to call dasd_eer_start_record first, to | ||
121 | * make sure that enough free space is available. | ||
122 | * Needs to be called with bufferlock held. | ||
123 | */ | ||
124 | static void dasd_eer_write_buffer(struct eerbuffer *eerb, | ||
125 | char *data, int count) | ||
126 | { | ||
127 | |||
128 | unsigned long headindex,localhead; | ||
129 | unsigned long rest, len; | ||
130 | char *nextdata; | ||
131 | |||
132 | nextdata = data; | ||
133 | rest = count; | ||
134 | while (rest > 0) { | ||
135 | headindex = eerb->head / PAGE_SIZE; | ||
136 | localhead = eerb->head % PAGE_SIZE; | ||
137 | len = min(rest, PAGE_SIZE - localhead); | ||
138 | memcpy(eerb->buffer[headindex]+localhead, nextdata, len); | ||
139 | nextdata += len; | ||
140 | rest -= len; | ||
141 | eerb->head += len; | ||
142 | if (eerb->head == eerb->buffersize) | ||
143 | eerb->head = 0; /* wrap around */ | ||
144 | BUG_ON(eerb->head > eerb->buffersize); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Needs to be called with bufferlock held. | ||
150 | */ | ||
151 | static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count) | ||
152 | { | ||
153 | |||
154 | unsigned long tailindex,localtail; | ||
155 | unsigned long rest, len, finalcount; | ||
156 | char *nextdata; | ||
157 | |||
158 | finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); | ||
159 | nextdata = data; | ||
160 | rest = finalcount; | ||
161 | while (rest > 0) { | ||
162 | tailindex = eerb->tail / PAGE_SIZE; | ||
163 | localtail = eerb->tail % PAGE_SIZE; | ||
164 | len = min(rest, PAGE_SIZE - localtail); | ||
165 | memcpy(nextdata, eerb->buffer[tailindex] + localtail, len); | ||
166 | nextdata += len; | ||
167 | rest -= len; | ||
168 | eerb->tail += len; | ||
169 | if (eerb->tail == eerb->buffersize) | ||
170 | eerb->tail = 0; /* wrap around */ | ||
171 | BUG_ON(eerb->tail > eerb->buffersize); | ||
172 | } | ||
173 | return finalcount; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Whenever you want to write a blob of data to the internal buffer you | ||
178 | * have to start by using this function first. It will write the number | ||
179 | * of bytes that will be written to the buffer. If necessary it will remove | ||
180 | * old records to make room for the new one. | ||
181 | * Needs to be called with bufferlock held. | ||
182 | */ | ||
183 | static int dasd_eer_start_record(struct eerbuffer *eerb, int count) | ||
184 | { | ||
185 | int tailcount; | ||
186 | |||
187 | if (count + sizeof(count) > eerb->buffersize) | ||
188 | return -ENOMEM; | ||
189 | while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { | ||
190 | if (eerb->residual > 0) { | ||
191 | eerb->tail += eerb->residual; | ||
192 | if (eerb->tail >= eerb->buffersize) | ||
193 | eerb->tail -= eerb->buffersize; | ||
194 | eerb->residual = -1; | ||
195 | } | ||
196 | dasd_eer_read_buffer(eerb, (char *) &tailcount, | ||
197 | sizeof(tailcount)); | ||
198 | eerb->tail += tailcount; | ||
199 | if (eerb->tail >= eerb->buffersize) | ||
200 | eerb->tail -= eerb->buffersize; | ||
201 | } | ||
202 | dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count)); | ||
203 | |||
204 | return 0; | ||
205 | }; | ||
206 | |||
207 | /* | ||
208 | * Release pages that are not used anymore. | ||
209 | */ | ||
210 | static void dasd_eer_free_buffer_pages(char **buf, int no_pages) | ||
211 | { | ||
212 | int i; | ||
213 | |||
214 | for (i = 0; i < no_pages; i++) | ||
215 | free_page((unsigned long) buf[i]); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Allocate a new set of memory pages. | ||
220 | */ | ||
221 | static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages) | ||
222 | { | ||
223 | int i; | ||
224 | |||
225 | for (i = 0; i < no_pages; i++) { | ||
226 | buf[i] = (char *) get_zeroed_page(GFP_KERNEL); | ||
227 | if (!buf[i]) { | ||
228 | dasd_eer_free_buffer_pages(buf, i); | ||
229 | return -ENOMEM; | ||
230 | } | ||
231 | } | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * SECTION: The extended error reporting functionality | ||
237 | */ | ||
238 | |||
239 | /* | ||
240 | * When a DASD device driver wants to report an error, it calls the | ||
241 | * function dasd_eer_write and gives the respective trigger ID as | ||
242 | * parameter. Currently there are four kinds of triggers: | ||
243 | * | ||
244 | * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems | ||
245 | * DASD_EER_PPRCSUSPEND: PPRC was suspended | ||
246 | * DASD_EER_NOPATH: There is no path to the device left. | ||
247 | * DASD_EER_STATECHANGE: The state of the device has changed. | ||
248 | * | ||
249 | * For the first three triggers all required information can be supplied by | ||
250 | * the caller. For these triggers a record is written by the function | ||
251 | * dasd_eer_write_standard_trigger. | ||
252 | * | ||
253 | * The DASD_EER_STATECHANGE trigger is special since a sense subsystem | ||
254 | * status ccw need to be executed to gather the necessary sense data first. | ||
255 | * The dasd_eer_snss function will queue the SNSS request and the request | ||
256 | * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE | ||
257 | * trigger. | ||
258 | * | ||
259 | * To avoid memory allocations at runtime, the necessary memory is allocated | ||
260 | * when the extended error reporting is enabled for a device (by | ||
261 | * dasd_eer_probe). There is one sense subsystem status request for each | ||
262 | * eer enabled DASD device. The presence of the cqr in device->eer_cqr | ||
263 | * indicates that eer is enable for the device. The use of the snss request | ||
264 | * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates | ||
265 | * that the cqr is currently in use, dasd_eer_snss cannot start a second | ||
266 | * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of | ||
267 | * the SNSS request will check the bit and call dasd_eer_snss again. | ||
268 | */ | ||
269 | |||
270 | #define SNSS_DATA_SIZE 44 | ||
271 | |||
272 | #define DASD_EER_BUSID_SIZE 10 | ||
273 | struct dasd_eer_header { | ||
274 | __u32 total_size; | ||
275 | __u32 trigger; | ||
276 | __u64 tv_sec; | ||
277 | __u64 tv_usec; | ||
278 | char busid[DASD_EER_BUSID_SIZE]; | ||
279 | }; | ||
280 | |||
281 | /* | ||
282 | * The following function can be used for those triggers that have | ||
283 | * all necessary data available when the function is called. | ||
284 | * If the parameter cqr is not NULL, the chain of requests will be searched | ||
285 | * for valid sense data, and all valid sense data sets will be added to | ||
286 | * the triggers data. | ||
287 | */ | ||
288 | static void dasd_eer_write_standard_trigger(struct dasd_device *device, | ||
289 | struct dasd_ccw_req *cqr, | ||
290 | int trigger) | ||
291 | { | ||
292 | struct dasd_ccw_req *temp_cqr; | ||
293 | int data_size; | ||
294 | struct timeval tv; | ||
295 | struct dasd_eer_header header; | ||
296 | unsigned long flags; | ||
297 | struct eerbuffer *eerb; | ||
298 | |||
299 | /* go through cqr chain and count the valid sense data sets */ | ||
300 | data_size = 0; | ||
301 | for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) | ||
302 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
303 | data_size += 32; | ||
304 | |||
305 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
306 | header.trigger = trigger; | ||
307 | do_gettimeofday(&tv); | ||
308 | header.tv_sec = tv.tv_sec; | ||
309 | header.tv_usec = tv.tv_usec; | ||
310 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
311 | |||
312 | spin_lock_irqsave(&bufferlock, flags); | ||
313 | list_for_each_entry(eerb, &bufferlist, list) { | ||
314 | dasd_eer_start_record(eerb, header.total_size); | ||
315 | dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header)); | ||
316 | for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) | ||
317 | if (temp_cqr->irb.esw.esw0.erw.cons) | ||
318 | dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32); | ||
319 | dasd_eer_write_buffer(eerb, "EOR", 4); | ||
320 | } | ||
321 | spin_unlock_irqrestore(&bufferlock, flags); | ||
322 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
323 | } | ||
324 | |||
325 | /* | ||
326 | * This function writes a DASD_EER_STATECHANGE trigger. | ||
327 | */ | ||
328 | static void dasd_eer_write_snss_trigger(struct dasd_device *device, | ||
329 | struct dasd_ccw_req *cqr, | ||
330 | int trigger) | ||
331 | { | ||
332 | int data_size; | ||
333 | int snss_rc; | ||
334 | struct timeval tv; | ||
335 | struct dasd_eer_header header; | ||
336 | unsigned long flags; | ||
337 | struct eerbuffer *eerb; | ||
338 | |||
339 | snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; | ||
340 | if (snss_rc) | ||
341 | data_size = 0; | ||
342 | else | ||
343 | data_size = SNSS_DATA_SIZE; | ||
344 | |||
345 | header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ | ||
346 | header.trigger = DASD_EER_STATECHANGE; | ||
347 | do_gettimeofday(&tv); | ||
348 | header.tv_sec = tv.tv_sec; | ||
349 | header.tv_usec = tv.tv_usec; | ||
350 | strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); | ||
351 | |||
352 | spin_lock_irqsave(&bufferlock, flags); | ||
353 | list_for_each_entry(eerb, &bufferlist, list) { | ||
354 | dasd_eer_start_record(eerb, header.total_size); | ||
355 | dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header)); | ||
356 | if (!snss_rc) | ||
357 | dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE); | ||
358 | dasd_eer_write_buffer(eerb, "EOR", 4); | ||
359 | } | ||
360 | spin_unlock_irqrestore(&bufferlock, flags); | ||
361 | wake_up_interruptible(&dasd_eer_read_wait_queue); | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * This function is called for all triggers. It calls the appropriate | ||
366 | * function that writes the actual trigger records. | ||
367 | */ | ||
368 | void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, | ||
369 | unsigned int id) | ||
370 | { | ||
371 | if (!device->eer_cqr) | ||
372 | return; | ||
373 | switch (id) { | ||
374 | case DASD_EER_FATALERROR: | ||
375 | case DASD_EER_PPRCSUSPEND: | ||
376 | dasd_eer_write_standard_trigger(device, cqr, id); | ||
377 | break; | ||
378 | case DASD_EER_NOPATH: | ||
379 | dasd_eer_write_standard_trigger(device, NULL, id); | ||
380 | break; | ||
381 | case DASD_EER_STATECHANGE: | ||
382 | dasd_eer_write_snss_trigger(device, cqr, id); | ||
383 | break; | ||
384 | default: /* unknown trigger, so we write it without any sense data */ | ||
385 | dasd_eer_write_standard_trigger(device, NULL, id); | ||
386 | break; | ||
387 | } | ||
388 | } | ||
389 | EXPORT_SYMBOL(dasd_eer_write); | ||
390 | |||
391 | /* | ||
392 | * Start a sense subsystem status request. | ||
393 | * Needs to be called with the device held. | ||
394 | */ | ||
395 | void dasd_eer_snss(struct dasd_device *device) | ||
396 | { | ||
397 | struct dasd_ccw_req *cqr; | ||
398 | |||
399 | cqr = device->eer_cqr; | ||
400 | if (!cqr) /* Device not eer enabled. */ | ||
401 | return; | ||
402 | if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) { | ||
403 | /* Sense subsystem status request in use. */ | ||
404 | set_bit(DASD_FLAG_EER_SNSS, &device->flags); | ||
405 | return; | ||
406 | } | ||
407 | clear_bit(DASD_FLAG_EER_SNSS, &device->flags); | ||
408 | cqr->status = DASD_CQR_QUEUED; | ||
409 | list_add(&cqr->list, &device->ccw_queue); | ||
410 | dasd_schedule_bh(device); | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Callback function for use with sense subsystem status request. | ||
415 | */ | ||
416 | static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) | ||
417 | { | ||
418 | struct dasd_device *device = cqr->device; | ||
419 | unsigned long flags; | ||
420 | |||
421 | dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); | ||
422 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
423 | if (device->eer_cqr == cqr) { | ||
424 | clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); | ||
425 | if (test_bit(DASD_FLAG_EER_SNSS, &device->flags)) | ||
426 | /* Another SNSS has been requested in the meantime. */ | ||
427 | dasd_eer_snss(device); | ||
428 | cqr = NULL; | ||
429 | } | ||
430 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
431 | if (cqr) | ||
432 | /* | ||
433 | * Extended error recovery has been switched off while | ||
434 | * the SNSS request was running. It could even have | ||
435 | * been switched off and on again in which case there | ||
436 | * is a new ccw in device->eer_cqr. Free the "old" | ||
437 | * snss request now. | ||
438 | */ | ||
439 | dasd_kfree_request(cqr, device); | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Enable error reporting on a given device. | ||
444 | */ | ||
445 | int dasd_eer_enable(struct dasd_device *device) | ||
446 | { | ||
447 | struct dasd_ccw_req *cqr; | ||
448 | unsigned long flags; | ||
449 | |||
450 | if (device->eer_cqr) | ||
451 | return 0; | ||
452 | |||
453 | if (!device->discipline || strcmp(device->discipline->name, "ECKD")) | ||
454 | return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ | ||
455 | |||
456 | cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, | ||
457 | SNSS_DATA_SIZE, device); | ||
458 | if (!cqr) | ||
459 | return -ENOMEM; | ||
460 | |||
461 | cqr->device = device; | ||
462 | cqr->retries = 255; | ||
463 | cqr->expires = 10 * HZ; | ||
464 | |||
465 | cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; | ||
466 | cqr->cpaddr->count = SNSS_DATA_SIZE; | ||
467 | cqr->cpaddr->flags = 0; | ||
468 | cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; | ||
469 | |||
470 | cqr->buildclk = get_clock(); | ||
471 | cqr->status = DASD_CQR_FILLED; | ||
472 | cqr->callback = dasd_eer_snss_cb; | ||
473 | |||
474 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
475 | if (!device->eer_cqr) { | ||
476 | device->eer_cqr = cqr; | ||
477 | cqr = NULL; | ||
478 | } | ||
479 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
480 | if (cqr) | ||
481 | dasd_kfree_request(cqr, device); | ||
482 | return 0; | ||
483 | } | ||
484 | |||
485 | /* | ||
486 | * Disable error reporting on a given device. | ||
487 | */ | ||
488 | void dasd_eer_disable(struct dasd_device *device) | ||
489 | { | ||
490 | struct dasd_ccw_req *cqr; | ||
491 | unsigned long flags; | ||
492 | int in_use; | ||
493 | |||
494 | if (!device->eer_cqr) | ||
495 | return; | ||
496 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | ||
497 | cqr = device->eer_cqr; | ||
498 | device->eer_cqr = NULL; | ||
499 | clear_bit(DASD_FLAG_EER_SNSS, &device->flags); | ||
500 | in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); | ||
501 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | ||
502 | if (cqr && !in_use) | ||
503 | dasd_kfree_request(cqr, device); | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * SECTION: the device operations | ||
508 | */ | ||
509 | |||
510 | /* | ||
511 | * On the one side we need a lock to access our internal buffer, on the | ||
512 | * other side a copy_to_user can sleep. So we need to copy the data we have | ||
513 | * to transfer in a readbuffer, which is protected by the readbuffer_mutex. | ||
514 | */ | ||
515 | static char readbuffer[PAGE_SIZE]; | ||
516 | static DECLARE_MUTEX(readbuffer_mutex); | ||
517 | |||
518 | static int dasd_eer_open(struct inode *inp, struct file *filp) | ||
519 | { | ||
520 | struct eerbuffer *eerb; | ||
521 | unsigned long flags; | ||
522 | |||
523 | eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); | ||
524 | eerb->buffer_page_count = eer_pages; | ||
525 | if (eerb->buffer_page_count < 1 || | ||
526 | eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { | ||
527 | kfree(eerb); | ||
528 | MESSAGE(KERN_WARNING, "can't open device since module " | ||
529 | "parameter eer_pages is smaller then 1 or" | ||
530 | " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); | ||
531 | return -EINVAL; | ||
532 | } | ||
533 | eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; | ||
534 | eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *), | ||
535 | GFP_KERNEL); | ||
536 | if (!eerb->buffer) { | ||
537 | kfree(eerb); | ||
538 | return -ENOMEM; | ||
539 | } | ||
540 | if (dasd_eer_allocate_buffer_pages(eerb->buffer, | ||
541 | eerb->buffer_page_count)) { | ||
542 | kfree(eerb->buffer); | ||
543 | kfree(eerb); | ||
544 | return -ENOMEM; | ||
545 | } | ||
546 | filp->private_data = eerb; | ||
547 | spin_lock_irqsave(&bufferlock, flags); | ||
548 | list_add(&eerb->list, &bufferlist); | ||
549 | spin_unlock_irqrestore(&bufferlock, flags); | ||
550 | |||
551 | return nonseekable_open(inp,filp); | ||
552 | } | ||
553 | |||
554 | static int dasd_eer_close(struct inode *inp, struct file *filp) | ||
555 | { | ||
556 | struct eerbuffer *eerb; | ||
557 | unsigned long flags; | ||
558 | |||
559 | eerb = (struct eerbuffer *) filp->private_data; | ||
560 | spin_lock_irqsave(&bufferlock, flags); | ||
561 | list_del(&eerb->list); | ||
562 | spin_unlock_irqrestore(&bufferlock, flags); | ||
563 | dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); | ||
564 | kfree(eerb->buffer); | ||
565 | kfree(eerb); | ||
566 | |||
567 | return 0; | ||
568 | } | ||
569 | |||
570 | static ssize_t dasd_eer_read(struct file *filp, char __user *buf, | ||
571 | size_t count, loff_t *ppos) | ||
572 | { | ||
573 | int tc,rc; | ||
574 | int tailcount,effective_count; | ||
575 | unsigned long flags; | ||
576 | struct eerbuffer *eerb; | ||
577 | |||
578 | eerb = (struct eerbuffer *) filp->private_data; | ||
579 | if (down_interruptible(&readbuffer_mutex)) | ||
580 | return -ERESTARTSYS; | ||
581 | |||
582 | spin_lock_irqsave(&bufferlock, flags); | ||
583 | |||
584 | if (eerb->residual < 0) { /* the remainder of this record */ | ||
585 | /* has been deleted */ | ||
586 | eerb->residual = 0; | ||
587 | spin_unlock_irqrestore(&bufferlock, flags); | ||
588 | up(&readbuffer_mutex); | ||
589 | return -EIO; | ||
590 | } else if (eerb->residual > 0) { | ||
591 | /* OK we still have a second half of a record to deliver */ | ||
592 | effective_count = min(eerb->residual, (int) count); | ||
593 | eerb->residual -= effective_count; | ||
594 | } else { | ||
595 | tc = 0; | ||
596 | while (!tc) { | ||
597 | tc = dasd_eer_read_buffer(eerb, (char *) &tailcount, | ||
598 | sizeof(tailcount)); | ||
599 | if (!tc) { | ||
600 | /* no data available */ | ||
601 | spin_unlock_irqrestore(&bufferlock, flags); | ||
602 | up(&readbuffer_mutex); | ||
603 | if (filp->f_flags & O_NONBLOCK) | ||
604 | return -EAGAIN; | ||
605 | rc = wait_event_interruptible( | ||
606 | dasd_eer_read_wait_queue, | ||
607 | eerb->head != eerb->tail); | ||
608 | if (rc) | ||
609 | return rc; | ||
610 | if (down_interruptible(&readbuffer_mutex)) | ||
611 | return -ERESTARTSYS; | ||
612 | spin_lock_irqsave(&bufferlock, flags); | ||
613 | } | ||
614 | } | ||
615 | WARN_ON(tc != sizeof(tailcount)); | ||
616 | effective_count = min(tailcount,(int)count); | ||
617 | eerb->residual = tailcount - effective_count; | ||
618 | } | ||
619 | |||
620 | tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count); | ||
621 | WARN_ON(tc != effective_count); | ||
622 | |||
623 | spin_unlock_irqrestore(&bufferlock, flags); | ||
624 | |||
625 | if (copy_to_user(buf, readbuffer, effective_count)) { | ||
626 | up(&readbuffer_mutex); | ||
627 | return -EFAULT; | ||
628 | } | ||
629 | |||
630 | up(&readbuffer_mutex); | ||
631 | return effective_count; | ||
632 | } | ||
633 | |||
634 | static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable) | ||
635 | { | ||
636 | unsigned int mask; | ||
637 | unsigned long flags; | ||
638 | struct eerbuffer *eerb; | ||
639 | |||
640 | eerb = (struct eerbuffer *) filp->private_data; | ||
641 | poll_wait(filp, &dasd_eer_read_wait_queue, ptable); | ||
642 | spin_lock_irqsave(&bufferlock, flags); | ||
643 | if (eerb->head != eerb->tail) | ||
644 | mask = POLLIN | POLLRDNORM ; | ||
645 | else | ||
646 | mask = 0; | ||
647 | spin_unlock_irqrestore(&bufferlock, flags); | ||
648 | return mask; | ||
649 | } | ||
650 | |||
651 | static struct file_operations dasd_eer_fops = { | ||
652 | .open = &dasd_eer_open, | ||
653 | .release = &dasd_eer_close, | ||
654 | .read = &dasd_eer_read, | ||
655 | .poll = &dasd_eer_poll, | ||
656 | .owner = THIS_MODULE, | ||
657 | }; | ||
658 | |||
659 | static struct miscdevice dasd_eer_dev = { | ||
660 | .minor = MISC_DYNAMIC_MINOR, | ||
661 | .name = "dasd_eer", | ||
662 | .fops = &dasd_eer_fops, | ||
663 | }; | ||
664 | |||
665 | int __init dasd_eer_init(void) | ||
666 | { | ||
667 | int rc; | ||
668 | |||
669 | rc = misc_register(&dasd_eer_dev); | ||
670 | if (rc) { | ||
671 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | ||
672 | "register misc device"); | ||
673 | return rc; | ||
674 | } | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | void __exit dasd_eer_exit(void) | ||
680 | { | ||
681 | WARN_ON(misc_deregister(&dasd_eer_dev) != 0); | ||
682 | } | ||