aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390/block
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/s390/block')
-rw-r--r--drivers/s390/block/Kconfig68
-rw-r--r--drivers/s390/block/Makefile17
-rw-r--r--drivers/s390/block/dasd.c2065
-rw-r--r--drivers/s390/block/dasd_3370_erp.c104
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2742
-rw-r--r--drivers/s390/block/dasd_9336_erp.c61
-rw-r--r--drivers/s390/block/dasd_9343_erp.c22
-rw-r--r--drivers/s390/block/dasd_cmb.c145
-rw-r--r--drivers/s390/block/dasd_devmap.c772
-rw-r--r--drivers/s390/block/dasd_diag.c541
-rw-r--r--drivers/s390/block/dasd_diag.h66
-rw-r--r--drivers/s390/block/dasd_eckd.c1722
-rw-r--r--drivers/s390/block/dasd_eckd.h346
-rw-r--r--drivers/s390/block/dasd_erp.c254
-rw-r--r--drivers/s390/block/dasd_fba.c607
-rw-r--r--drivers/s390/block/dasd_fba.h73
-rw-r--r--drivers/s390/block/dasd_genhd.c185
-rw-r--r--drivers/s390/block/dasd_int.h576
-rw-r--r--drivers/s390/block/dasd_ioctl.c554
-rw-r--r--drivers/s390/block/dasd_proc.c319
-rw-r--r--drivers/s390/block/dcssblk.c775
-rw-r--r--drivers/s390/block/xpram.c539
22 files changed, 12553 insertions, 0 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
new file mode 100644
index 000000000000..dc1c89dbdb8f
--- /dev/null
+++ b/drivers/s390/block/Kconfig
@@ -0,0 +1,68 @@
1if ARCH_S390
2
3comment "S/390 block device drivers"
4 depends on ARCH_S390
5
6config BLK_DEV_XPRAM
7 tristate "XPRAM disk support"
8 depends on ARCH_S390
9 help
10 Select this option if you want to use your expanded storage on S/390
11 or zSeries as a disk. This is useful as a _fast_ swap device if you
12 want to access more than 2G of memory when running in 31 bit mode.
13 This option is also available as a module which will be called
14 xpram. If unsure, say "N".
15
16config DCSSBLK
17 tristate "DCSSBLK support"
18 help
19 Support for dcss block device
20
21config DASD
22 tristate "Support for DASD devices"
23 depends on CCW
24 help
25 Enable this option if you want to access DASDs directly utilizing
26 S/390s channel subsystem commands. This is necessary for running
27 natively on a single image or an LPAR.
28
29config DASD_PROFILE
30 bool "Profiling support for dasd devices"
31 depends on DASD
32 help
33 Enable this option if you want to see profiling information
34 in /proc/dasd/statistics.
35
36config DASD_ECKD
37 tristate "Support for ECKD Disks"
38 depends on DASD
39 help
40 ECKD devices are the most commonly used devices. You should enable
41 this option unless you are very sure to have no ECKD device.
42
43config DASD_FBA
44 tristate "Support for FBA Disks"
45 depends on DASD
46 help
47 Select this option to be able to access FBA devices. It is safe to
48 say "Y".
49
50config DASD_DIAG
51 tristate "Support for DIAG access to Disks"
52 depends on DASD && ARCH_S390X = 'n'
53 help
54 Select this option if you want to use Diagnose250 command to access
55 Disks under VM. If you are not running under VM or unsure what it is,
56 say "N".
57
58config DASD_CMB
59 tristate "Compatibility interface for DASD channel measurement blocks"
60 depends on DASD
61 help
62 This driver provides an additional interface to the channel measurement
63 facility, which is normally accessed though sysfs, with a set of
64 ioctl functions specific to the dasd driver.
65 This is only needed if you want to use applications written for
66 linux-2.4 dasd channel measurement facility interface.
67
68endif
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
new file mode 100644
index 000000000000..58c6780134f7
--- /dev/null
+++ b/drivers/s390/block/Makefile
@@ -0,0 +1,17 @@
1#
2# S/390 block devices
3#
4
5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
7dasd_diag_mod-objs := dasd_diag.o
8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
9 dasd_genhd.o dasd_erp.o
10
11obj-$(CONFIG_DASD) += dasd_mod.o
12obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
13obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
14obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
15obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
16obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
17obj-$(CONFIG_DCSSBLK) += dcssblk.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
new file mode 100644
index 000000000000..b755bac6ccbc
--- /dev/null
+++ b/drivers/s390/block/dasd.c
@@ -0,0 +1,2065 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * $Revision: 1.158 $
11 */
12
13#include <linux/config.h>
14#include <linux/kmod.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/ctype.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/buffer_head.h>
21
22#include <asm/ccwdev.h>
23#include <asm/ebcdic.h>
24#include <asm/idals.h>
25#include <asm/todclk.h>
26
27/* This is ugly... */
28#define PRINTK_HEADER "dasd:"
29
30#include "dasd_int.h"
31/*
32 * SECTION: Constant definitions to be used within this file
33 */
34#define DASD_CHANQ_MAX_SIZE 4
35
36/*
37 * SECTION: exported variables of dasd.c
38 */
39debug_info_t *dasd_debug_area;
40struct dasd_discipline *dasd_diag_discipline_pointer;
41
42MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
43MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
44 " Copyright 2000 IBM Corporation");
45MODULE_SUPPORTED_DEVICE("dasd");
46MODULE_PARM(dasd, "1-" __MODULE_STRING(256) "s");
47MODULE_LICENSE("GPL");
48
49/*
50 * SECTION: prototypes for static functions of dasd.c
51 */
52static int dasd_alloc_queue(struct dasd_device * device);
53static void dasd_setup_queue(struct dasd_device * device);
54static void dasd_free_queue(struct dasd_device * device);
55static void dasd_flush_request_queue(struct dasd_device *);
56static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
57static void dasd_flush_ccw_queue(struct dasd_device *, int);
58static void dasd_tasklet(struct dasd_device *);
59static void do_kick_device(void *data);
60
61/*
62 * SECTION: Operations on the device structure.
63 */
64static wait_queue_head_t dasd_init_waitq;
65
66/*
67 * Allocate memory for a new device structure.
68 */
69struct dasd_device *
70dasd_alloc_device(void)
71{
72 struct dasd_device *device;
73
74 device = kmalloc(sizeof (struct dasd_device), GFP_ATOMIC);
75 if (device == NULL)
76 return ERR_PTR(-ENOMEM);
77 memset(device, 0, sizeof (struct dasd_device));
78 /* open_count = 0 means device online but not in use */
79 atomic_set(&device->open_count, -1);
80
81 /* Get two pages for normal block device operations. */
82 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
83 if (device->ccw_mem == NULL) {
84 kfree(device);
85 return ERR_PTR(-ENOMEM);
86 }
87 /* Get one page for error recovery. */
88 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
89 if (device->erp_mem == NULL) {
90 free_pages((unsigned long) device->ccw_mem, 1);
91 kfree(device);
92 return ERR_PTR(-ENOMEM);
93 }
94
95 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
96 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
97 spin_lock_init(&device->mem_lock);
98 spin_lock_init(&device->request_queue_lock);
99 atomic_set (&device->tasklet_scheduled, 0);
100 tasklet_init(&device->tasklet,
101 (void (*)(unsigned long)) dasd_tasklet,
102 (unsigned long) device);
103 INIT_LIST_HEAD(&device->ccw_queue);
104 init_timer(&device->timer);
105 INIT_WORK(&device->kick_work, do_kick_device, device);
106 device->state = DASD_STATE_NEW;
107 device->target = DASD_STATE_NEW;
108
109 return device;
110}
111
112/*
113 * Free memory of a device structure.
114 */
115void
116dasd_free_device(struct dasd_device *device)
117{
118 if (device->private)
119 kfree(device->private);
120 free_page((unsigned long) device->erp_mem);
121 free_pages((unsigned long) device->ccw_mem, 1);
122 kfree(device);
123}
124
125/*
126 * Make a new device known to the system.
127 */
128static inline int
129dasd_state_new_to_known(struct dasd_device *device)
130{
131 int rc;
132
133 /*
134 * As long as the device is not in state DASD_STATE_NEW we want to
135 * keep the reference count > 0.
136 */
137 dasd_get_device(device);
138
139 rc = dasd_alloc_queue(device);
140 if (rc) {
141 dasd_put_device(device);
142 return rc;
143 }
144
145 device->state = DASD_STATE_KNOWN;
146 return 0;
147}
148
149/*
150 * Let the system forget about a device.
151 */
152static inline void
153dasd_state_known_to_new(struct dasd_device * device)
154{
155 /* Forget the discipline information. */
156 device->discipline = NULL;
157 device->state = DASD_STATE_NEW;
158
159 dasd_free_queue(device);
160
161 /* Give up reference we took in dasd_state_new_to_known. */
162 dasd_put_device(device);
163}
164
165/*
166 * Request the irq line for the device.
167 */
168static inline int
169dasd_state_known_to_basic(struct dasd_device * device)
170{
171 int rc;
172
173 /* Allocate and register gendisk structure. */
174 rc = dasd_gendisk_alloc(device);
175 if (rc)
176 return rc;
177
178 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
179 device->debug_area = debug_register(device->cdev->dev.bus_id, 0, 2,
180 8 * sizeof (long));
181 debug_register_view(device->debug_area, &debug_sprintf_view);
182 debug_set_level(device->debug_area, DBF_EMERG);
183 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
184
185 device->state = DASD_STATE_BASIC;
186 return 0;
187}
188
189/*
190 * Release the irq line for the device. Terminate any running i/o.
191 */
192static inline void
193dasd_state_basic_to_known(struct dasd_device * device)
194{
195 dasd_gendisk_free(device);
196 dasd_flush_ccw_queue(device, 1);
197 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
198 if (device->debug_area != NULL) {
199 debug_unregister(device->debug_area);
200 device->debug_area = NULL;
201 }
202 device->state = DASD_STATE_KNOWN;
203}
204
205/*
206 * Do the initial analysis. The do_analysis function may return
207 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
208 * until the discipline decides to continue the startup sequence
209 * by calling the function dasd_change_state. The eckd disciplines
210 * uses this to start a ccw that detects the format. The completion
211 * interrupt for this detection ccw uses the kernel event daemon to
212 * trigger the call to dasd_change_state. All this is done in the
213 * discipline code, see dasd_eckd.c.
214 * After the analysis ccw is done (do_analysis returned 0 or error)
215 * the block device is setup. Either a fake disk is added to allow
216 * formatting or a proper device request queue is created.
217 */
218static inline int
219dasd_state_basic_to_ready(struct dasd_device * device)
220{
221 int rc;
222
223 rc = 0;
224 if (device->discipline->do_analysis != NULL)
225 rc = device->discipline->do_analysis(device);
226 if (rc)
227 return rc;
228 dasd_setup_queue(device);
229 device->state = DASD_STATE_READY;
230 if (dasd_scan_partitions(device) != 0)
231 device->state = DASD_STATE_BASIC;
232 return 0;
233}
234
235/*
236 * Remove device from block device layer. Destroy dirty buffers.
237 * Forget format information. Check if the target level is basic
238 * and if it is create fake disk for formatting.
239 */
240static inline void
241dasd_state_ready_to_basic(struct dasd_device * device)
242{
243 dasd_flush_ccw_queue(device, 0);
244 dasd_destroy_partitions(device);
245 dasd_flush_request_queue(device);
246 device->blocks = 0;
247 device->bp_block = 0;
248 device->s2b_shift = 0;
249 device->state = DASD_STATE_BASIC;
250}
251
252/*
253 * Make the device online and schedule the bottom half to start
254 * the requeueing of requests from the linux request queue to the
255 * ccw queue.
256 */
257static inline int
258dasd_state_ready_to_online(struct dasd_device * device)
259{
260 device->state = DASD_STATE_ONLINE;
261 dasd_schedule_bh(device);
262 return 0;
263}
264
265/*
266 * Stop the requeueing of requests again.
267 */
268static inline void
269dasd_state_online_to_ready(struct dasd_device * device)
270{
271 device->state = DASD_STATE_READY;
272}
273
274/*
275 * Device startup state changes.
276 */
277static inline int
278dasd_increase_state(struct dasd_device *device)
279{
280 int rc;
281
282 rc = 0;
283 if (device->state == DASD_STATE_NEW &&
284 device->target >= DASD_STATE_KNOWN)
285 rc = dasd_state_new_to_known(device);
286
287 if (!rc &&
288 device->state == DASD_STATE_KNOWN &&
289 device->target >= DASD_STATE_BASIC)
290 rc = dasd_state_known_to_basic(device);
291
292 if (!rc &&
293 device->state == DASD_STATE_BASIC &&
294 device->target >= DASD_STATE_READY)
295 rc = dasd_state_basic_to_ready(device);
296
297 if (!rc &&
298 device->state == DASD_STATE_READY &&
299 device->target >= DASD_STATE_ONLINE)
300 rc = dasd_state_ready_to_online(device);
301
302 return rc;
303}
304
305/*
306 * Device shutdown state changes.
307 */
308static inline int
309dasd_decrease_state(struct dasd_device *device)
310{
311 if (device->state == DASD_STATE_ONLINE &&
312 device->target <= DASD_STATE_READY)
313 dasd_state_online_to_ready(device);
314
315 if (device->state == DASD_STATE_READY &&
316 device->target <= DASD_STATE_BASIC)
317 dasd_state_ready_to_basic(device);
318
319 if (device->state == DASD_STATE_BASIC &&
320 device->target <= DASD_STATE_KNOWN)
321 dasd_state_basic_to_known(device);
322
323 if (device->state == DASD_STATE_KNOWN &&
324 device->target <= DASD_STATE_NEW)
325 dasd_state_known_to_new(device);
326
327 return 0;
328}
329
330/*
331 * This is the main startup/shutdown routine.
332 */
333static void
334dasd_change_state(struct dasd_device *device)
335{
336 int rc;
337
338 if (device->state == device->target)
339 /* Already where we want to go today... */
340 return;
341 if (device->state < device->target)
342 rc = dasd_increase_state(device);
343 else
344 rc = dasd_decrease_state(device);
345 if (rc && rc != -EAGAIN)
346 device->target = device->state;
347
348 if (device->state == device->target)
349 wake_up(&dasd_init_waitq);
350}
351
352/*
353 * Kick starter for devices that did not complete the startup/shutdown
354 * procedure or were sleeping because of a pending state.
355 * dasd_kick_device will schedule a call do do_kick_device to the kernel
356 * event daemon.
357 */
358static void
359do_kick_device(void *data)
360{
361 struct dasd_device *device;
362
363 device = (struct dasd_device *) data;
364 dasd_change_state(device);
365 dasd_schedule_bh(device);
366 dasd_put_device(device);
367}
368
369void
370dasd_kick_device(struct dasd_device *device)
371{
372 dasd_get_device(device);
373 /* queue call to dasd_kick_device to the kernel event daemon. */
374 schedule_work(&device->kick_work);
375}
376
377/*
378 * Set the target state for a device and starts the state change.
379 */
380void
381dasd_set_target_state(struct dasd_device *device, int target)
382{
383 /* If we are in probeonly mode stop at DASD_STATE_READY. */
384 if (dasd_probeonly && target > DASD_STATE_READY)
385 target = DASD_STATE_READY;
386 if (device->target != target) {
387 if (device->state == target)
388 wake_up(&dasd_init_waitq);
389 device->target = target;
390 }
391 if (device->state != device->target)
392 dasd_change_state(device);
393}
394
395/*
396 * Enable devices with device numbers in [from..to].
397 */
398static inline int
399_wait_for_device(struct dasd_device *device)
400{
401 return (device->state == device->target);
402}
403
404void
405dasd_enable_device(struct dasd_device *device)
406{
407 dasd_set_target_state(device, DASD_STATE_ONLINE);
408 if (device->state <= DASD_STATE_KNOWN)
409 /* No discipline for device found. */
410 dasd_set_target_state(device, DASD_STATE_NEW);
411 /* Now wait for the devices to come up. */
412 wait_event(dasd_init_waitq, _wait_for_device(device));
413}
414
415/*
416 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
417 */
418#ifdef CONFIG_DASD_PROFILE
419
420struct dasd_profile_info_t dasd_global_profile;
421unsigned int dasd_profile_level = DASD_PROFILE_OFF;
422
423/*
424 * Increments counter in global and local profiling structures.
425 */
426#define dasd_profile_counter(value, counter, device) \
427{ \
428 int index; \
429 for (index = 0; index < 31 && value >> (2+index); index++); \
430 dasd_global_profile.counter[index]++; \
431 device->profile.counter[index]++; \
432}
433
434/*
435 * Add profiling information for cqr before execution.
436 */
437static inline void
438dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
439 struct request *req)
440{
441 struct list_head *l;
442 unsigned int counter;
443
444 if (dasd_profile_level != DASD_PROFILE_ON)
445 return;
446
447 /* count the length of the chanq for statistics */
448 counter = 0;
449 list_for_each(l, &device->ccw_queue)
450 if (++counter >= 31)
451 break;
452 dasd_global_profile.dasd_io_nr_req[counter]++;
453 device->profile.dasd_io_nr_req[counter]++;
454}
455
456/*
457 * Add profiling information for cqr after execution.
458 */
459static inline void
460dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
461 struct request *req)
462{
463 long strtime, irqtime, endtime, tottime; /* in microseconds */
464 long tottimeps, sectors;
465
466 if (dasd_profile_level != DASD_PROFILE_ON)
467 return;
468
469 sectors = req->nr_sectors;
470 if (!cqr->buildclk || !cqr->startclk ||
471 !cqr->stopclk || !cqr->endclk ||
472 !sectors)
473 return;
474
475 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
476 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
477 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
478 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
479 tottimeps = tottime / sectors;
480
481 if (!dasd_global_profile.dasd_io_reqs)
482 memset(&dasd_global_profile, 0,
483 sizeof (struct dasd_profile_info_t));
484 dasd_global_profile.dasd_io_reqs++;
485 dasd_global_profile.dasd_io_sects += sectors;
486
487 if (!device->profile.dasd_io_reqs)
488 memset(&device->profile, 0,
489 sizeof (struct dasd_profile_info_t));
490 device->profile.dasd_io_reqs++;
491 device->profile.dasd_io_sects += sectors;
492
493 dasd_profile_counter(sectors, dasd_io_secs, device);
494 dasd_profile_counter(tottime, dasd_io_times, device);
495 dasd_profile_counter(tottimeps, dasd_io_timps, device);
496 dasd_profile_counter(strtime, dasd_io_time1, device);
497 dasd_profile_counter(irqtime, dasd_io_time2, device);
498 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device);
499 dasd_profile_counter(endtime, dasd_io_time3, device);
500}
501#else
502#define dasd_profile_start(device, cqr, req) do {} while (0)
503#define dasd_profile_end(device, cqr, req) do {} while (0)
504#endif /* CONFIG_DASD_PROFILE */
505
506/*
507 * Allocate memory for a channel program with 'cplength' channel
508 * command words and 'datasize' additional space. There are two
509 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
510 * memory and 2) dasd_smalloc_request uses the static ccw memory
511 * that gets allocated for each device.
512 */
513struct dasd_ccw_req *
514dasd_kmalloc_request(char *magic, int cplength, int datasize,
515 struct dasd_device * device)
516{
517 struct dasd_ccw_req *cqr;
518
519 /* Sanity checks */
520 if ( magic == NULL || datasize > PAGE_SIZE ||
521 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
522 BUG();
523
524 cqr = kmalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
525 if (cqr == NULL)
526 return ERR_PTR(-ENOMEM);
527 memset(cqr, 0, sizeof(struct dasd_ccw_req));
528 cqr->cpaddr = NULL;
529 if (cplength > 0) {
530 cqr->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
531 GFP_ATOMIC | GFP_DMA);
532 if (cqr->cpaddr == NULL) {
533 kfree(cqr);
534 return ERR_PTR(-ENOMEM);
535 }
536 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
537 }
538 cqr->data = NULL;
539 if (datasize > 0) {
540 cqr->data = kmalloc(datasize, GFP_ATOMIC | GFP_DMA);
541 if (cqr->data == NULL) {
542 if (cqr->cpaddr != NULL)
543 kfree(cqr->cpaddr);
544 kfree(cqr);
545 return ERR_PTR(-ENOMEM);
546 }
547 memset(cqr->data, 0, datasize);
548 }
549 strncpy((char *) &cqr->magic, magic, 4);
550 ASCEBC((char *) &cqr->magic, 4);
551 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
552 dasd_get_device(device);
553 return cqr;
554}
555
556struct dasd_ccw_req *
557dasd_smalloc_request(char *magic, int cplength, int datasize,
558 struct dasd_device * device)
559{
560 unsigned long flags;
561 struct dasd_ccw_req *cqr;
562 char *data;
563 int size;
564
565 /* Sanity checks */
566 if ( magic == NULL || datasize > PAGE_SIZE ||
567 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
568 BUG();
569
570 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
571 if (cplength > 0)
572 size += cplength * sizeof(struct ccw1);
573 if (datasize > 0)
574 size += datasize;
575 spin_lock_irqsave(&device->mem_lock, flags);
576 cqr = (struct dasd_ccw_req *)
577 dasd_alloc_chunk(&device->ccw_chunks, size);
578 spin_unlock_irqrestore(&device->mem_lock, flags);
579 if (cqr == NULL)
580 return ERR_PTR(-ENOMEM);
581 memset(cqr, 0, sizeof(struct dasd_ccw_req));
582 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
583 cqr->cpaddr = NULL;
584 if (cplength > 0) {
585 cqr->cpaddr = (struct ccw1 *) data;
586 data += cplength*sizeof(struct ccw1);
587 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
588 }
589 cqr->data = NULL;
590 if (datasize > 0) {
591 cqr->data = data;
592 memset(cqr->data, 0, datasize);
593 }
594 strncpy((char *) &cqr->magic, magic, 4);
595 ASCEBC((char *) &cqr->magic, 4);
596 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
597 dasd_get_device(device);
598 return cqr;
599}
600
601/*
602 * Free memory of a channel program. This function needs to free all the
603 * idal lists that might have been created by dasd_set_cda and the
604 * struct dasd_ccw_req itself.
605 */
606void
607dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
608{
609#ifdef CONFIG_ARCH_S390X
610 struct ccw1 *ccw;
611
612 /* Clear any idals used for the request. */
613 ccw = cqr->cpaddr;
614 do {
615 clear_normalized_cda(ccw);
616 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
617#endif
618 if (cqr->cpaddr != NULL)
619 kfree(cqr->cpaddr);
620 if (cqr->data != NULL)
621 kfree(cqr->data);
622 kfree(cqr);
623 dasd_put_device(device);
624}
625
626void
627dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
628{
629 unsigned long flags;
630
631 spin_lock_irqsave(&device->mem_lock, flags);
632 dasd_free_chunk(&device->ccw_chunks, cqr);
633 spin_unlock_irqrestore(&device->mem_lock, flags);
634 dasd_put_device(device);
635}
636
637/*
638 * Check discipline magic in cqr.
639 */
640static inline int
641dasd_check_cqr(struct dasd_ccw_req *cqr)
642{
643 struct dasd_device *device;
644
645 if (cqr == NULL)
646 return -EINVAL;
647 device = cqr->device;
648 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
649 DEV_MESSAGE(KERN_WARNING, device,
650 " dasd_ccw_req 0x%08x magic doesn't match"
651 " discipline 0x%08x",
652 cqr->magic,
653 *(unsigned int *) device->discipline->name);
654 return -EINVAL;
655 }
656 return 0;
657}
658
659/*
660 * Terminate the current i/o and set the request to clear_pending.
661 * Timer keeps device runnig.
662 * ccw_device_clear can fail if the i/o subsystem
663 * is in a bad mood.
664 */
665int
666dasd_term_IO(struct dasd_ccw_req * cqr)
667{
668 struct dasd_device *device;
669 int retries, rc;
670
671 /* Check the cqr */
672 rc = dasd_check_cqr(cqr);
673 if (rc)
674 return rc;
675 retries = 0;
676 device = (struct dasd_device *) cqr->device;
677 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
678 rc = ccw_device_clear(device->cdev, (long) cqr);
679 switch (rc) {
680 case 0: /* termination successful */
681 if (cqr->retries > 0) {
682 cqr->retries--;
683 cqr->status = DASD_CQR_CLEAR;
684 } else
685 cqr->status = DASD_CQR_FAILED;
686 cqr->stopclk = get_clock();
687 DBF_DEV_EVENT(DBF_DEBUG, device,
688 "terminate cqr %p successful",
689 cqr);
690 break;
691 case -ENODEV:
692 DBF_DEV_EVENT(DBF_ERR, device, "%s",
693 "device gone, retry");
694 break;
695 case -EIO:
696 DBF_DEV_EVENT(DBF_ERR, device, "%s",
697 "I/O error, retry");
698 break;
699 case -EINVAL:
700 case -EBUSY:
701 DBF_DEV_EVENT(DBF_ERR, device, "%s",
702 "device busy, retry later");
703 break;
704 default:
705 DEV_MESSAGE(KERN_ERR, device,
706 "line %d unknown RC=%d, please "
707 "report to linux390@de.ibm.com",
708 __LINE__, rc);
709 BUG();
710 break;
711 }
712 retries++;
713 }
714 dasd_schedule_bh(device);
715 return rc;
716}
717
718/*
719 * Start the i/o. This start_IO can fail if the channel is really busy.
720 * In that case set up a timer to start the request later.
721 */
722int
723dasd_start_IO(struct dasd_ccw_req * cqr)
724{
725 struct dasd_device *device;
726 int rc;
727
728 /* Check the cqr */
729 rc = dasd_check_cqr(cqr);
730 if (rc)
731 return rc;
732 device = (struct dasd_device *) cqr->device;
733 if (cqr->retries < 0) {
734 DEV_MESSAGE(KERN_DEBUG, device,
735 "start_IO: request %p (%02x/%i) - no retry left.",
736 cqr, cqr->status, cqr->retries);
737 cqr->status = DASD_CQR_FAILED;
738 return -EIO;
739 }
740 cqr->startclk = get_clock();
741 cqr->starttime = jiffies;
742 cqr->retries--;
743 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
744 cqr->lpm, 0);
745 switch (rc) {
746 case 0:
747 cqr->status = DASD_CQR_IN_IO;
748 DBF_DEV_EVENT(DBF_DEBUG, device,
749 "start_IO: request %p started successful",
750 cqr);
751 break;
752 case -EBUSY:
753 DBF_DEV_EVENT(DBF_ERR, device, "%s",
754 "start_IO: device busy, retry later");
755 break;
756 case -ETIMEDOUT:
757 DBF_DEV_EVENT(DBF_ERR, device, "%s",
758 "start_IO: request timeout, retry later");
759 break;
760 case -EACCES:
761 /* -EACCES indicates that the request used only a
762 * subset of the available pathes and all these
763 * pathes are gone.
764 * Do a retry with all available pathes.
765 */
766 cqr->lpm = LPM_ANYPATH;
767 DBF_DEV_EVENT(DBF_ERR, device, "%s",
768 "start_IO: selected pathes gone,"
769 " retry on all pathes");
770 break;
771 case -ENODEV:
772 case -EIO:
773 DBF_DEV_EVENT(DBF_ERR, device, "%s",
774 "start_IO: device gone, retry");
775 break;
776 default:
777 DEV_MESSAGE(KERN_ERR, device,
778 "line %d unknown RC=%d, please report"
779 " to linux390@de.ibm.com", __LINE__, rc);
780 BUG();
781 break;
782 }
783 return rc;
784}
785
786/*
787 * Timeout function for dasd devices. This is used for different purposes
788 * 1) missing interrupt handler for normal operation
789 * 2) delayed start of request where start_IO failed with -EBUSY
790 * 3) timeout for missing state change interrupts
791 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
792 * DASD_CQR_QUEUED for 2) and 3).
793 */
794static void
795dasd_timeout_device(unsigned long ptr)
796{
797 unsigned long flags;
798 struct dasd_device *device;
799
800 device = (struct dasd_device *) ptr;
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 /* re-activate request queue */
803 device->stopped &= ~DASD_STOPPED_PENDING;
804 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
805 dasd_schedule_bh(device);
806}
807
808/*
809 * Setup timeout for a device in jiffies.
810 */
811void
812dasd_set_timer(struct dasd_device *device, int expires)
813{
814 if (expires == 0) {
815 if (timer_pending(&device->timer))
816 del_timer(&device->timer);
817 return;
818 }
819 if (timer_pending(&device->timer)) {
820 if (mod_timer(&device->timer, jiffies + expires))
821 return;
822 }
823 device->timer.function = dasd_timeout_device;
824 device->timer.data = (unsigned long) device;
825 device->timer.expires = jiffies + expires;
826 add_timer(&device->timer);
827}
828
829/*
830 * Clear timeout for a device.
831 */
832void
833dasd_clear_timer(struct dasd_device *device)
834{
835 if (timer_pending(&device->timer))
836 del_timer(&device->timer);
837}
838
839static void
840dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
841{
842 struct dasd_ccw_req *cqr;
843 struct dasd_device *device;
844
845 cqr = (struct dasd_ccw_req *) intparm;
846 if (cqr->status != DASD_CQR_IN_IO) {
847 MESSAGE(KERN_DEBUG,
848 "invalid status in handle_killed_request: "
849 "bus_id %s, status %02x",
850 cdev->dev.bus_id, cqr->status);
851 return;
852 }
853
854 device = (struct dasd_device *) cqr->device;
855 if (device == NULL ||
856 device != dasd_device_from_cdev(cdev) ||
857 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
858 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
859 cdev->dev.bus_id);
860 return;
861 }
862
863 /* Schedule request to be retried. */
864 cqr->status = DASD_CQR_QUEUED;
865
866 dasd_clear_timer(device);
867 dasd_schedule_bh(device);
868 dasd_put_device(device);
869}
870
871static void
872dasd_handle_state_change_pending(struct dasd_device *device)
873{
874 struct dasd_ccw_req *cqr;
875 struct list_head *l, *n;
876
877 device->stopped &= ~DASD_STOPPED_PENDING;
878
879 /* restart all 'running' IO on queue */
880 list_for_each_safe(l, n, &device->ccw_queue) {
881 cqr = list_entry(l, struct dasd_ccw_req, list);
882 if (cqr->status == DASD_CQR_IN_IO) {
883 cqr->status = DASD_CQR_QUEUED;
884 }
885 }
886 dasd_clear_timer(device);
887 dasd_schedule_bh(device);
888}
889
890/*
891 * Interrupt handler for "normal" ssch-io based dasd devices.
892 */
893void
894dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
895 struct irb *irb)
896{
897 struct dasd_ccw_req *cqr, *next;
898 struct dasd_device *device;
899 unsigned long long now;
900 int expires;
901 dasd_era_t era;
902 char mask;
903
904 if (IS_ERR(irb)) {
905 switch (PTR_ERR(irb)) {
906 case -EIO:
907 dasd_handle_killed_request(cdev, intparm);
908 break;
909 case -ETIMEDOUT:
910 printk(KERN_WARNING"%s(%s): request timed out\n",
911 __FUNCTION__, cdev->dev.bus_id);
912 //FIXME - dasd uses own timeout interface...
913 break;
914 default:
915 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
916 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
917 }
918 return;
919 }
920
921 now = get_clock();
922
923 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x",
924 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
925 (unsigned int) intparm);
926
927 /* first of all check for state change pending interrupt */
928 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
929 if ((irb->scsw.dstat & mask) == mask) {
930 device = dasd_device_from_cdev(cdev);
931 if (!IS_ERR(device)) {
932 dasd_handle_state_change_pending(device);
933 dasd_put_device(device);
934 }
935 return;
936 }
937
938 cqr = (struct dasd_ccw_req *) intparm;
939
940 /* check for unsolicited interrupts */
941 if (cqr == NULL) {
942 MESSAGE(KERN_DEBUG,
943 "unsolicited interrupt received: bus_id %s",
944 cdev->dev.bus_id);
945 return;
946 }
947
948 device = (struct dasd_device *) cqr->device;
949 if (device == NULL ||
950 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
951 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
952 cdev->dev.bus_id);
953 return;
954 }
955
956 /* Check for clear pending */
957 if (cqr->status == DASD_CQR_CLEAR &&
958 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
959 cqr->status = DASD_CQR_QUEUED;
960 dasd_clear_timer(device);
961 dasd_schedule_bh(device);
962 return;
963 }
964
965 /* check status - the request might have been killed by dyn detach */
966 if (cqr->status != DASD_CQR_IN_IO) {
967 MESSAGE(KERN_DEBUG,
968 "invalid status: bus_id %s, status %02x",
969 cdev->dev.bus_id, cqr->status);
970 return;
971 }
972 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
973 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
974
975 /* Find out the appropriate era_action. */
976 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
977 era = dasd_era_fatal;
978 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
979 irb->scsw.cstat == 0 &&
980 !irb->esw.esw0.erw.cons)
981 era = dasd_era_none;
982 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags))
983 era = dasd_era_fatal; /* don't recover this request */
984 else if (irb->esw.esw0.erw.cons)
985 era = device->discipline->examine_error(cqr, irb);
986 else
987 era = dasd_era_recover;
988
989 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
990 expires = 0;
991 if (era == dasd_era_none) {
992 cqr->status = DASD_CQR_DONE;
993 cqr->stopclk = now;
994 /* Start first request on queue if possible -> fast_io. */
995 if (cqr->list.next != &device->ccw_queue) {
996 next = list_entry(cqr->list.next,
997 struct dasd_ccw_req, list);
998 if ((next->status == DASD_CQR_QUEUED) &&
999 (!device->stopped)) {
1000 if (device->discipline->start_IO(next) == 0)
1001 expires = next->expires;
1002 else
1003 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1004 "Interrupt fastpath "
1005 "failed!");
1006 }
1007 }
1008 } else { /* error */
1009 memcpy(&cqr->irb, irb, sizeof (struct irb));
1010#ifdef ERP_DEBUG
1011 /* dump sense data */
1012 dasd_log_sense(cqr, irb);
1013#endif
1014 switch (era) {
1015 case dasd_era_fatal:
1016 cqr->status = DASD_CQR_FAILED;
1017 cqr->stopclk = now;
1018 break;
1019 case dasd_era_recover:
1020 cqr->status = DASD_CQR_ERROR;
1021 break;
1022 default:
1023 BUG();
1024 }
1025 }
1026 if (expires != 0)
1027 dasd_set_timer(device, expires);
1028 else
1029 dasd_clear_timer(device);
1030 dasd_schedule_bh(device);
1031}
1032
1033/*
1034 * posts the buffer_cache about a finalized request
1035 */
1036static inline void
1037dasd_end_request(struct request *req, int uptodate)
1038{
1039 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1040 BUG();
1041 add_disk_randomness(req->rq_disk);
1042 end_that_request_last(req);
1043}
1044
1045/*
1046 * Process finished error recovery ccw.
1047 */
1048static inline void
1049__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr)
1050{
1051 dasd_erp_fn_t erp_fn;
1052
1053 if (cqr->status == DASD_CQR_DONE)
1054 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1055 else
1056 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1057 erp_fn = device->discipline->erp_postaction(cqr);
1058 erp_fn(cqr);
1059}
1060
1061/*
1062 * Process ccw request queue.
1063 */
1064static inline void
1065__dasd_process_ccw_queue(struct dasd_device * device,
1066 struct list_head *final_queue)
1067{
1068 struct list_head *l, *n;
1069 struct dasd_ccw_req *cqr;
1070 dasd_erp_fn_t erp_fn;
1071
1072restart:
1073 /* Process request with final status. */
1074 list_for_each_safe(l, n, &device->ccw_queue) {
1075 cqr = list_entry(l, struct dasd_ccw_req, list);
1076 /* Stop list processing at the first non-final request. */
1077 if (cqr->status != DASD_CQR_DONE &&
1078 cqr->status != DASD_CQR_FAILED &&
1079 cqr->status != DASD_CQR_ERROR)
1080 break;
1081 /* Process requests with DASD_CQR_ERROR */
1082 if (cqr->status == DASD_CQR_ERROR) {
1083 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
1084 cqr->status = DASD_CQR_FAILED;
1085 cqr->stopclk = get_clock();
1086 } else {
1087 if (cqr->irb.esw.esw0.erw.cons) {
1088 erp_fn = device->discipline->
1089 erp_action(cqr);
1090 erp_fn(cqr);
1091 } else
1092 dasd_default_erp_action(cqr);
1093 }
1094 goto restart;
1095 }
1096 /* Process finished ERP request. */
1097 if (cqr->refers) {
1098 __dasd_process_erp(device, cqr);
1099 goto restart;
1100 }
1101
1102 /* Rechain finished requests to final queue */
1103 cqr->endclk = get_clock();
1104 list_move_tail(&cqr->list, final_queue);
1105 }
1106}
1107
1108static void
1109dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1110{
1111 struct request *req;
1112 struct dasd_device *device;
1113 int status;
1114
1115 req = (struct request *) data;
1116 device = cqr->device;
1117 dasd_profile_end(device, cqr, req);
1118 status = cqr->device->discipline->free_cp(cqr,req);
1119 spin_lock_irq(&device->request_queue_lock);
1120 dasd_end_request(req, status);
1121 spin_unlock_irq(&device->request_queue_lock);
1122}
1123
1124
1125/*
1126 * Fetch requests from the block device queue.
1127 */
1128static inline void
1129__dasd_process_blk_queue(struct dasd_device * device)
1130{
1131 request_queue_t *queue;
1132 struct request *req;
1133 struct dasd_ccw_req *cqr;
1134 int nr_queued;
1135
1136 queue = device->request_queue;
1137 /* No queue ? Then there is nothing to do. */
1138 if (queue == NULL)
1139 return;
1140
1141 /*
1142 * We requeue request from the block device queue to the ccw
1143 * queue only in two states. In state DASD_STATE_READY the
1144 * partition detection is done and we need to requeue requests
1145 * for that. State DASD_STATE_ONLINE is normal block device
1146 * operation.
1147 */
1148 if (device->state != DASD_STATE_READY &&
1149 device->state != DASD_STATE_ONLINE)
1150 return;
1151 nr_queued = 0;
1152 /* Now we try to fetch requests from the request queue */
1153 list_for_each_entry(cqr, &device->ccw_queue, list)
1154 if (cqr->status == DASD_CQR_QUEUED)
1155 nr_queued++;
1156 while (!blk_queue_plugged(queue) &&
1157 elv_next_request(queue) &&
1158 nr_queued < DASD_CHANQ_MAX_SIZE) {
1159 req = elv_next_request(queue);
1160 if (test_bit(DASD_FLAG_RO, &device->flags) &&
1161 rq_data_dir(req) == WRITE) {
1162 DBF_DEV_EVENT(DBF_ERR, device,
1163 "Rejecting write request %p",
1164 req);
1165 blkdev_dequeue_request(req);
1166 dasd_end_request(req, 0);
1167 continue;
1168 }
1169 if (device->stopped & DASD_STOPPED_DC_EIO) {
1170 blkdev_dequeue_request(req);
1171 dasd_end_request(req, 0);
1172 continue;
1173 }
1174 cqr = device->discipline->build_cp(device, req);
1175 if (IS_ERR(cqr)) {
1176 if (PTR_ERR(cqr) == -ENOMEM)
1177 break; /* terminate request queue loop */
1178 DBF_DEV_EVENT(DBF_ERR, device,
1179 "CCW creation failed (rc=%ld) "
1180 "on request %p",
1181 PTR_ERR(cqr), req);
1182 blkdev_dequeue_request(req);
1183 dasd_end_request(req, 0);
1184 continue;
1185 }
1186 cqr->callback = dasd_end_request_cb;
1187 cqr->callback_data = (void *) req;
1188 cqr->status = DASD_CQR_QUEUED;
1189 blkdev_dequeue_request(req);
1190 list_add_tail(&cqr->list, &device->ccw_queue);
1191 dasd_profile_start(device, cqr, req);
1192 nr_queued++;
1193 }
1194}
1195
1196/*
1197 * Take a look at the first request on the ccw queue and check
1198 * if it reached its expire time. If so, terminate the IO.
1199 */
1200static inline void
1201__dasd_check_expire(struct dasd_device * device)
1202{
1203 struct dasd_ccw_req *cqr;
1204
1205 if (list_empty(&device->ccw_queue))
1206 return;
1207 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1208 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1209 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1210 if (device->discipline->term_IO(cqr) != 0)
1211 /* Hmpf, try again in 1/10 sec */
1212 dasd_set_timer(device, 10);
1213 }
1214 }
1215}
1216
1217/*
1218 * Take a look at the first request on the ccw queue and check
1219 * if it needs to be started.
1220 */
1221static inline void
1222__dasd_start_head(struct dasd_device * device)
1223{
1224 struct dasd_ccw_req *cqr;
1225 int rc;
1226
1227 if (list_empty(&device->ccw_queue))
1228 return;
1229 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1230 if ((cqr->status == DASD_CQR_QUEUED) &&
1231 (!device->stopped)) {
1232 /* try to start the first I/O that can be started */
1233 rc = device->discipline->start_IO(cqr);
1234 if (rc == 0)
1235 dasd_set_timer(device, cqr->expires);
1236 else if (rc == -EACCES) {
1237 dasd_schedule_bh(device);
1238 } else
1239 /* Hmpf, try again in 1/2 sec */
1240 dasd_set_timer(device, 50);
1241 }
1242}
1243
1244/*
1245 * Remove requests from the ccw queue.
1246 */
1247static void
1248dasd_flush_ccw_queue(struct dasd_device * device, int all)
1249{
1250 struct list_head flush_queue;
1251 struct list_head *l, *n;
1252 struct dasd_ccw_req *cqr;
1253
1254 INIT_LIST_HEAD(&flush_queue);
1255 spin_lock_irq(get_ccwdev_lock(device->cdev));
1256 list_for_each_safe(l, n, &device->ccw_queue) {
1257 cqr = list_entry(l, struct dasd_ccw_req, list);
1258 /* Flush all request or only block device requests? */
1259 if (all == 0 && cqr->callback == dasd_end_request_cb)
1260 continue;
1261 if (cqr->status == DASD_CQR_IN_IO)
1262 device->discipline->term_IO(cqr);
1263 if (cqr->status != DASD_CQR_DONE ||
1264 cqr->status != DASD_CQR_FAILED) {
1265 cqr->status = DASD_CQR_FAILED;
1266 cqr->stopclk = get_clock();
1267 }
1268 /* Process finished ERP request. */
1269 if (cqr->refers) {
1270 __dasd_process_erp(device, cqr);
1271 continue;
1272 }
1273 /* Rechain request on device request queue */
1274 cqr->endclk = get_clock();
1275 list_move_tail(&cqr->list, &flush_queue);
1276 }
1277 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1278 /* Now call the callback function of flushed requests */
1279 list_for_each_safe(l, n, &flush_queue) {
1280 cqr = list_entry(l, struct dasd_ccw_req, list);
1281 if (cqr->callback != NULL)
1282 (cqr->callback)(cqr, cqr->callback_data);
1283 }
1284}
1285
1286/*
1287 * Acquire the device lock and process queues for the device.
1288 */
1289static void
1290dasd_tasklet(struct dasd_device * device)
1291{
1292 struct list_head final_queue;
1293 struct list_head *l, *n;
1294 struct dasd_ccw_req *cqr;
1295
1296 atomic_set (&device->tasklet_scheduled, 0);
1297 INIT_LIST_HEAD(&final_queue);
1298 spin_lock_irq(get_ccwdev_lock(device->cdev));
1299 /* Check expire time of first request on the ccw queue. */
1300 __dasd_check_expire(device);
1301 /* Finish off requests on ccw queue */
1302 __dasd_process_ccw_queue(device, &final_queue);
1303 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1304 /* Now call the callback function of requests with final status */
1305 list_for_each_safe(l, n, &final_queue) {
1306 cqr = list_entry(l, struct dasd_ccw_req, list);
1307 list_del(&cqr->list);
1308 if (cqr->callback != NULL)
1309 (cqr->callback)(cqr, cqr->callback_data);
1310 }
1311 spin_lock_irq(&device->request_queue_lock);
1312 spin_lock(get_ccwdev_lock(device->cdev));
1313 /* Get new request from the block device request queue */
1314 __dasd_process_blk_queue(device);
1315 /* Now check if the head of the ccw queue needs to be started. */
1316 __dasd_start_head(device);
1317 spin_unlock(get_ccwdev_lock(device->cdev));
1318 spin_unlock_irq(&device->request_queue_lock);
1319 dasd_put_device(device);
1320}
1321
1322/*
1323 * Schedules a call to dasd_tasklet over the device tasklet.
1324 */
1325void
1326dasd_schedule_bh(struct dasd_device * device)
1327{
1328 /* Protect against rescheduling. */
1329 if (atomic_compare_and_swap (0, 1, &device->tasklet_scheduled))
1330 return;
1331 dasd_get_device(device);
1332 tasklet_hi_schedule(&device->tasklet);
1333}
1334
1335/*
1336 * Queue a request to the head of the ccw_queue. Start the I/O if
1337 * possible.
1338 */
1339void
1340dasd_add_request_head(struct dasd_ccw_req *req)
1341{
1342 struct dasd_device *device;
1343 unsigned long flags;
1344
1345 device = req->device;
1346 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1347 req->status = DASD_CQR_QUEUED;
1348 req->device = device;
1349 list_add(&req->list, &device->ccw_queue);
1350 /* let the bh start the request to keep them in order */
1351 dasd_schedule_bh(device);
1352 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1353}
1354
1355/*
1356 * Queue a request to the tail of the ccw_queue. Start the I/O if
1357 * possible.
1358 */
1359void
1360dasd_add_request_tail(struct dasd_ccw_req *req)
1361{
1362 struct dasd_device *device;
1363 unsigned long flags;
1364
1365 device = req->device;
1366 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1367 req->status = DASD_CQR_QUEUED;
1368 req->device = device;
1369 list_add_tail(&req->list, &device->ccw_queue);
1370 /* let the bh start the request to keep them in order */
1371 dasd_schedule_bh(device);
1372 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1373}
1374
1375/*
1376 * Wakeup callback.
1377 */
1378static void
1379dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1380{
1381 wake_up((wait_queue_head_t *) data);
1382}
1383
1384static inline int
1385_wait_for_wakeup(struct dasd_ccw_req *cqr)
1386{
1387 struct dasd_device *device;
1388 int rc;
1389
1390 device = cqr->device;
1391 spin_lock_irq(get_ccwdev_lock(device->cdev));
1392 rc = cqr->status == DASD_CQR_DONE || cqr->status == DASD_CQR_FAILED;
1393 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1394 return rc;
1395}
1396
1397/*
1398 * Attempts to start a special ccw queue and waits for its completion.
1399 */
1400int
1401dasd_sleep_on(struct dasd_ccw_req * cqr)
1402{
1403 wait_queue_head_t wait_q;
1404 struct dasd_device *device;
1405 int rc;
1406
1407 device = cqr->device;
1408 spin_lock_irq(get_ccwdev_lock(device->cdev));
1409
1410 init_waitqueue_head (&wait_q);
1411 cqr->callback = dasd_wakeup_cb;
1412 cqr->callback_data = (void *) &wait_q;
1413 cqr->status = DASD_CQR_QUEUED;
1414 list_add_tail(&cqr->list, &device->ccw_queue);
1415
1416 /* let the bh start the request to keep them in order */
1417 dasd_schedule_bh(device);
1418
1419 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1420
1421 wait_event(wait_q, _wait_for_wakeup(cqr));
1422
1423 /* Request status is either done or failed. */
1424 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1425 return rc;
1426}
1427
1428/*
1429 * Attempts to start a special ccw queue and wait interruptible
1430 * for its completion.
1431 */
1432int
1433dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1434{
1435 wait_queue_head_t wait_q;
1436 struct dasd_device *device;
1437 int rc, finished;
1438
1439 device = cqr->device;
1440 spin_lock_irq(get_ccwdev_lock(device->cdev));
1441
1442 init_waitqueue_head (&wait_q);
1443 cqr->callback = dasd_wakeup_cb;
1444 cqr->callback_data = (void *) &wait_q;
1445 cqr->status = DASD_CQR_QUEUED;
1446 list_add_tail(&cqr->list, &device->ccw_queue);
1447
1448 /* let the bh start the request to keep them in order */
1449 dasd_schedule_bh(device);
1450 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1451
1452 finished = 0;
1453 while (!finished) {
1454 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1455 if (rc != -ERESTARTSYS) {
1456 /* Request status is either done or failed. */
1457 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1458 break;
1459 }
1460 spin_lock_irq(get_ccwdev_lock(device->cdev));
1461 if (cqr->status == DASD_CQR_IN_IO &&
1462 device->discipline->term_IO(cqr) == 0) {
1463 list_del(&cqr->list);
1464 finished = 1;
1465 }
1466 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1467 }
1468 return rc;
1469}
1470
1471/*
1472 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1473 * for eckd devices) the currently running request has to be terminated
1474 * and be put back to status queued, before the special request is added
1475 * to the head of the queue. Then the special request is waited on normally.
1476 */
1477static inline int
1478_dasd_term_running_cqr(struct dasd_device *device)
1479{
1480 struct dasd_ccw_req *cqr;
1481 int rc;
1482
1483 if (list_empty(&device->ccw_queue))
1484 return 0;
1485 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1486 rc = device->discipline->term_IO(cqr);
1487 if (rc == 0) {
1488 /* termination successful */
1489 cqr->status = DASD_CQR_QUEUED;
1490 cqr->startclk = cqr->stopclk = 0;
1491 cqr->starttime = 0;
1492 }
1493 return rc;
1494}
1495
1496int
1497dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1498{
1499 wait_queue_head_t wait_q;
1500 struct dasd_device *device;
1501 int rc;
1502
1503 device = cqr->device;
1504 spin_lock_irq(get_ccwdev_lock(device->cdev));
1505 rc = _dasd_term_running_cqr(device);
1506 if (rc) {
1507 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1508 return rc;
1509 }
1510
1511 init_waitqueue_head (&wait_q);
1512 cqr->callback = dasd_wakeup_cb;
1513 cqr->callback_data = (void *) &wait_q;
1514 cqr->status = DASD_CQR_QUEUED;
1515 list_add(&cqr->list, &device->ccw_queue);
1516
1517 /* let the bh start the request to keep them in order */
1518 dasd_schedule_bh(device);
1519
1520 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1521
1522 wait_event(wait_q, _wait_for_wakeup(cqr));
1523
1524 /* Request status is either done or failed. */
1525 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
1526 return rc;
1527}
1528
1529/*
1530 * Cancels a request that was started with dasd_sleep_on_req.
1531 * This is useful to timeout requests. The request will be
1532 * terminated if it is currently in i/o.
1533 * Returns 1 if the request has been terminated.
1534 */
1535int
1536dasd_cancel_req(struct dasd_ccw_req *cqr)
1537{
1538 struct dasd_device *device = cqr->device;
1539 unsigned long flags;
1540 int rc;
1541
1542 rc = 0;
1543 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1544 switch (cqr->status) {
1545 case DASD_CQR_QUEUED:
1546 /* request was not started - just set to failed */
1547 cqr->status = DASD_CQR_FAILED;
1548 break;
1549 case DASD_CQR_IN_IO:
1550 /* request in IO - terminate IO and release again */
1551 if (device->discipline->term_IO(cqr) != 0)
1552 /* what to do if unable to terminate ??????
1553 e.g. not _IN_IO */
1554 cqr->status = DASD_CQR_FAILED;
1555 cqr->stopclk = get_clock();
1556 rc = 1;
1557 break;
1558 case DASD_CQR_DONE:
1559 case DASD_CQR_FAILED:
1560 /* already finished - do nothing */
1561 break;
1562 default:
1563 DEV_MESSAGE(KERN_ALERT, device,
1564 "invalid status %02x in request",
1565 cqr->status);
1566 BUG();
1567
1568 }
1569 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1570 dasd_schedule_bh(device);
1571 return rc;
1572}
1573
1574/*
1575 * SECTION: Block device operations (request queue, partitions, open, release).
1576 */
1577
1578/*
1579 * Dasd request queue function. Called from ll_rw_blk.c
1580 */
1581static void
1582do_dasd_request(request_queue_t * queue)
1583{
1584 struct dasd_device *device;
1585
1586 device = (struct dasd_device *) queue->queuedata;
1587 spin_lock(get_ccwdev_lock(device->cdev));
1588 /* Get new request from the block device request queue */
1589 __dasd_process_blk_queue(device);
1590 /* Now check if the head of the ccw queue needs to be started. */
1591 __dasd_start_head(device);
1592 spin_unlock(get_ccwdev_lock(device->cdev));
1593}
1594
1595/*
1596 * Allocate and initialize request queue and default I/O scheduler.
1597 */
1598static int
1599dasd_alloc_queue(struct dasd_device * device)
1600{
1601 int rc;
1602
1603 device->request_queue = blk_init_queue(do_dasd_request,
1604 &device->request_queue_lock);
1605 if (device->request_queue == NULL)
1606 return -ENOMEM;
1607
1608 device->request_queue->queuedata = device;
1609
1610 elevator_exit(device->request_queue->elevator);
1611 rc = elevator_init(device->request_queue, "deadline");
1612 if (rc) {
1613 blk_cleanup_queue(device->request_queue);
1614 return rc;
1615 }
1616 return 0;
1617}
1618
1619/*
1620 * Allocate and initialize request queue.
1621 */
1622static void
1623dasd_setup_queue(struct dasd_device * device)
1624{
1625 int max;
1626
1627 blk_queue_hardsect_size(device->request_queue, device->bp_block);
1628 max = device->discipline->max_blocks << device->s2b_shift;
1629 blk_queue_max_sectors(device->request_queue, max);
1630 blk_queue_max_phys_segments(device->request_queue, -1L);
1631 blk_queue_max_hw_segments(device->request_queue, -1L);
1632 blk_queue_max_segment_size(device->request_queue, -1L);
1633 blk_queue_segment_boundary(device->request_queue, -1L);
1634}
1635
1636/*
1637 * Deactivate and free request queue.
1638 */
1639static void
1640dasd_free_queue(struct dasd_device * device)
1641{
1642 if (device->request_queue) {
1643 blk_cleanup_queue(device->request_queue);
1644 device->request_queue = NULL;
1645 }
1646}
1647
1648/*
1649 * Flush request on the request queue.
1650 */
1651static void
1652dasd_flush_request_queue(struct dasd_device * device)
1653{
1654 struct request *req;
1655
1656 if (!device->request_queue)
1657 return;
1658
1659 spin_lock_irq(&device->request_queue_lock);
1660 while (!list_empty(&device->request_queue->queue_head)) {
1661 req = elv_next_request(device->request_queue);
1662 if (req == NULL)
1663 break;
1664 dasd_end_request(req, 0);
1665 blkdev_dequeue_request(req);
1666 }
1667 spin_unlock_irq(&device->request_queue_lock);
1668}
1669
1670static int
1671dasd_open(struct inode *inp, struct file *filp)
1672{
1673 struct gendisk *disk = inp->i_bdev->bd_disk;
1674 struct dasd_device *device = disk->private_data;
1675 int rc;
1676
1677 atomic_inc(&device->open_count);
1678 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1679 rc = -ENODEV;
1680 goto unlock;
1681 }
1682
1683 if (!try_module_get(device->discipline->owner)) {
1684 rc = -EINVAL;
1685 goto unlock;
1686 }
1687
1688 if (dasd_probeonly) {
1689 DEV_MESSAGE(KERN_INFO, device, "%s",
1690 "No access to device due to probeonly mode");
1691 rc = -EPERM;
1692 goto out;
1693 }
1694
1695 if (device->state < DASD_STATE_BASIC) {
1696 DBF_DEV_EVENT(DBF_ERR, device, " %s",
1697 " Cannot open unrecognized device");
1698 rc = -ENODEV;
1699 goto out;
1700 }
1701
1702 return 0;
1703
1704out:
1705 module_put(device->discipline->owner);
1706unlock:
1707 atomic_dec(&device->open_count);
1708 return rc;
1709}
1710
1711static int
1712dasd_release(struct inode *inp, struct file *filp)
1713{
1714 struct gendisk *disk = inp->i_bdev->bd_disk;
1715 struct dasd_device *device = disk->private_data;
1716
1717 atomic_dec(&device->open_count);
1718 module_put(device->discipline->owner);
1719 return 0;
1720}
1721
1722struct block_device_operations
1723dasd_device_operations = {
1724 .owner = THIS_MODULE,
1725 .open = dasd_open,
1726 .release = dasd_release,
1727 .ioctl = dasd_ioctl,
1728};
1729
1730
1731static void
1732dasd_exit(void)
1733{
1734#ifdef CONFIG_PROC_FS
1735 dasd_proc_exit();
1736#endif
1737 dasd_ioctl_exit();
1738 dasd_gendisk_exit();
1739 dasd_devmap_exit();
1740 devfs_remove("dasd");
1741 if (dasd_debug_area != NULL) {
1742 debug_unregister(dasd_debug_area);
1743 dasd_debug_area = NULL;
1744 }
1745}
1746
1747/*
1748 * SECTION: common functions for ccw_driver use
1749 */
1750
1751/* initial attempt at a probe function. this can be simplified once
1752 * the other detection code is gone */
1753int
1754dasd_generic_probe (struct ccw_device *cdev,
1755 struct dasd_discipline *discipline)
1756{
1757 int ret;
1758
1759 ret = dasd_add_sysfs_files(cdev);
1760 if (ret) {
1761 printk(KERN_WARNING
1762 "dasd_generic_probe: could not add sysfs entries "
1763 "for %s\n", cdev->dev.bus_id);
1764 }
1765
1766 cdev->handler = &dasd_int_handler;
1767
1768 return ret;
1769}
1770
1771/* this will one day be called from a global not_oper handler.
1772 * It is also used by driver_unregister during module unload */
1773void
1774dasd_generic_remove (struct ccw_device *cdev)
1775{
1776 struct dasd_device *device;
1777
1778 dasd_remove_sysfs_files(cdev);
1779 device = dasd_device_from_cdev(cdev);
1780 if (IS_ERR(device))
1781 return;
1782 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1783 /* Already doing offline processing */
1784 dasd_put_device(device);
1785 return;
1786 }
1787 /*
1788 * This device is removed unconditionally. Set offline
1789 * flag to prevent dasd_open from opening it while it is
1790 * no quite down yet.
1791 */
1792 dasd_set_target_state(device, DASD_STATE_NEW);
1793 /* dasd_delete_device destroys the device reference. */
1794 dasd_delete_device(device);
1795}
1796
1797/* activate a device. This is called from dasd_{eckd,fba}_probe() when either
1798 * the device is detected for the first time and is supposed to be used
1799 * or the user has started activation through sysfs */
1800int
1801dasd_generic_set_online (struct ccw_device *cdev,
1802 struct dasd_discipline *discipline)
1803
1804{
1805 struct dasd_device *device;
1806 int rc;
1807
1808 device = dasd_create_device(cdev);
1809 if (IS_ERR(device))
1810 return PTR_ERR(device);
1811
1812 if (test_bit(DASD_FLAG_USE_DIAG, &device->flags)) {
1813 if (!dasd_diag_discipline_pointer) {
1814 printk (KERN_WARNING
1815 "dasd_generic couldn't online device %s "
1816 "- discipline DIAG not available\n",
1817 cdev->dev.bus_id);
1818 dasd_delete_device(device);
1819 return -ENODEV;
1820 }
1821 discipline = dasd_diag_discipline_pointer;
1822 }
1823 device->discipline = discipline;
1824
1825 rc = discipline->check_device(device);
1826 if (rc) {
1827 printk (KERN_WARNING
1828 "dasd_generic couldn't online device %s "
1829 "with discipline %s rc=%i\n",
1830 cdev->dev.bus_id, discipline->name, rc);
1831 dasd_delete_device(device);
1832 return rc;
1833 }
1834
1835 dasd_set_target_state(device, DASD_STATE_ONLINE);
1836 if (device->state <= DASD_STATE_KNOWN) {
1837 printk (KERN_WARNING
1838 "dasd_generic discipline not found for %s\n",
1839 cdev->dev.bus_id);
1840 rc = -ENODEV;
1841 dasd_set_target_state(device, DASD_STATE_NEW);
1842 dasd_delete_device(device);
1843 } else
1844 pr_debug("dasd_generic device %s found\n",
1845 cdev->dev.bus_id);
1846
1847 /* FIXME: we have to wait for the root device but we don't want
1848 * to wait for each single device but for all at once. */
1849 wait_event(dasd_init_waitq, _wait_for_device(device));
1850
1851 dasd_put_device(device);
1852
1853 return rc;
1854}
1855
1856int
1857dasd_generic_set_offline (struct ccw_device *cdev)
1858{
1859 struct dasd_device *device;
1860 int max_count;
1861
1862 device = dasd_device_from_cdev(cdev);
1863 if (IS_ERR(device))
1864 return PTR_ERR(device);
1865 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1866 /* Already doing offline processing */
1867 dasd_put_device(device);
1868 return 0;
1869 }
1870 /*
1871 * We must make sure that this device is currently not in use.
1872 * The open_count is increased for every opener, that includes
1873 * the blkdev_get in dasd_scan_partitions. We are only interested
1874 * in the other openers.
1875 */
1876 max_count = device->bdev ? 0 : -1;
1877 if (atomic_read(&device->open_count) > max_count) {
1878 printk (KERN_WARNING "Can't offline dasd device with open"
1879 " count = %i.\n",
1880 atomic_read(&device->open_count));
1881 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
1882 dasd_put_device(device);
1883 return -EBUSY;
1884 }
1885 dasd_set_target_state(device, DASD_STATE_NEW);
1886 /* dasd_delete_device destroys the device reference. */
1887 dasd_delete_device(device);
1888
1889 return 0;
1890}
1891
1892int
1893dasd_generic_notify(struct ccw_device *cdev, int event)
1894{
1895 struct dasd_device *device;
1896 struct dasd_ccw_req *cqr;
1897 unsigned long flags;
1898 int ret;
1899
1900 device = dasd_device_from_cdev(cdev);
1901 if (IS_ERR(device))
1902 return 0;
1903 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1904 ret = 0;
1905 switch (event) {
1906 case CIO_GONE:
1907 case CIO_NO_PATH:
1908 if (device->state < DASD_STATE_BASIC)
1909 break;
1910 /* Device is active. We want to keep it. */
1911 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) {
1912 list_for_each_entry(cqr, &device->ccw_queue, list)
1913 if (cqr->status == DASD_CQR_IN_IO)
1914 cqr->status = DASD_CQR_FAILED;
1915 device->stopped |= DASD_STOPPED_DC_EIO;
1916 dasd_schedule_bh(device);
1917 } else {
1918 list_for_each_entry(cqr, &device->ccw_queue, list)
1919 if (cqr->status == DASD_CQR_IN_IO) {
1920 cqr->status = DASD_CQR_QUEUED;
1921 cqr->retries++;
1922 }
1923 device->stopped |= DASD_STOPPED_DC_WAIT;
1924 dasd_set_timer(device, 0);
1925 }
1926 ret = 1;
1927 break;
1928 case CIO_OPER:
1929 /* FIXME: add a sanity check. */
1930 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO);
1931 dasd_schedule_bh(device);
1932 ret = 1;
1933 break;
1934 }
1935 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1936 dasd_put_device(device);
1937 return ret;
1938}
1939
1940/*
1941 * Automatically online either all dasd devices (dasd_autodetect) or
1942 * all devices specified with dasd= parameters.
1943 */
1944void
1945dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
1946{
1947 struct device_driver *drv;
1948 struct device *d, *dev;
1949 struct ccw_device *cdev;
1950
1951 drv = get_driver(&dasd_discipline_driver->driver);
1952 down_read(&drv->bus->subsys.rwsem);
1953 dev = NULL;
1954 list_for_each_entry(d, &drv->devices, driver_list) {
1955 dev = get_device(d);
1956 if (!dev)
1957 continue;
1958 cdev = to_ccwdev(dev);
1959 if (dasd_autodetect || dasd_busid_known(cdev->dev.bus_id) == 0)
1960 ccw_device_set_online(cdev);
1961 put_device(dev);
1962 }
1963 up_read(&drv->bus->subsys.rwsem);
1964 put_driver(drv);
1965}
1966
1967static int __init
1968dasd_init(void)
1969{
1970 int rc;
1971
1972 init_waitqueue_head(&dasd_init_waitq);
1973
1974 /* register 'common' DASD debug area, used for all DBF_XXX calls */
1975 dasd_debug_area = debug_register("dasd", 0, 2, 8 * sizeof (long));
1976 if (dasd_debug_area == NULL) {
1977 rc = -ENOMEM;
1978 goto failed;
1979 }
1980 debug_register_view(dasd_debug_area, &debug_sprintf_view);
1981 debug_set_level(dasd_debug_area, DBF_EMERG);
1982
1983 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
1984
1985 dasd_diag_discipline_pointer = NULL;
1986
1987 rc = devfs_mk_dir("dasd");
1988 if (rc)
1989 goto failed;
1990 rc = dasd_devmap_init();
1991 if (rc)
1992 goto failed;
1993 rc = dasd_gendisk_init();
1994 if (rc)
1995 goto failed;
1996 rc = dasd_parse();
1997 if (rc)
1998 goto failed;
1999 rc = dasd_ioctl_init();
2000 if (rc)
2001 goto failed;
2002#ifdef CONFIG_PROC_FS
2003 rc = dasd_proc_init();
2004 if (rc)
2005 goto failed;
2006#endif
2007
2008 return 0;
2009failed:
2010 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors");
2011 dasd_exit();
2012 return rc;
2013}
2014
2015module_init(dasd_init);
2016module_exit(dasd_exit);
2017
2018EXPORT_SYMBOL(dasd_debug_area);
2019EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2020
2021EXPORT_SYMBOL(dasd_add_request_head);
2022EXPORT_SYMBOL(dasd_add_request_tail);
2023EXPORT_SYMBOL(dasd_cancel_req);
2024EXPORT_SYMBOL(dasd_clear_timer);
2025EXPORT_SYMBOL(dasd_enable_device);
2026EXPORT_SYMBOL(dasd_int_handler);
2027EXPORT_SYMBOL(dasd_kfree_request);
2028EXPORT_SYMBOL(dasd_kick_device);
2029EXPORT_SYMBOL(dasd_kmalloc_request);
2030EXPORT_SYMBOL(dasd_schedule_bh);
2031EXPORT_SYMBOL(dasd_set_target_state);
2032EXPORT_SYMBOL(dasd_set_timer);
2033EXPORT_SYMBOL(dasd_sfree_request);
2034EXPORT_SYMBOL(dasd_sleep_on);
2035EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2036EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2037EXPORT_SYMBOL(dasd_smalloc_request);
2038EXPORT_SYMBOL(dasd_start_IO);
2039EXPORT_SYMBOL(dasd_term_IO);
2040
2041EXPORT_SYMBOL_GPL(dasd_generic_probe);
2042EXPORT_SYMBOL_GPL(dasd_generic_remove);
2043EXPORT_SYMBOL_GPL(dasd_generic_notify);
2044EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2045EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2046EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
2047
2048/*
2049 * Overrides for Emacs so that we follow Linus's tabbing style.
2050 * Emacs will notice this stuff at the end of the file and automatically
2051 * adjust the settings for this buffer only. This must remain at the end
2052 * of the file.
2053 * ---------------------------------------------------------------------------
2054 * Local variables:
2055 * c-indent-level: 4
2056 * c-brace-imaginary-offset: 0
2057 * c-brace-offset: -4
2058 * c-argdecl-indent: 4
2059 * c-label-offset: -4
2060 * c-continued-statement-offset: 4
2061 * c-continued-brace-offset: 0
2062 * indent-tabs-mode: 1
2063 * tab-width: 8
2064 * End:
2065 */
diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c
new file mode 100644
index 000000000000..84565c8f584e
--- /dev/null
+++ b/drivers/s390/block/dasd_3370_erp.c
@@ -0,0 +1,104 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_3370_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 * $Revision: 1.9 $
8 */
9
10#define PRINTK_HEADER "dasd_erp(3370)"
11
12#include "dasd_int.h"
13
14
15/*
16 * DASD_3370_ERP_EXAMINE
17 *
18 * DESCRIPTION
19 * Checks only for fatal/no/recover error.
20 * A detailed examination of the sense data is done later outside
21 * the interrupt handler.
22 *
23 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
24 * 'Chapter 7. 3370 Sense Data'.
25 *
26 * RETURN VALUES
27 * dasd_era_none no error
28 * dasd_era_fatal for all fatal (unrecoverable errors)
29 * dasd_era_recover for all others.
30 */
31dasd_era_t
32dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
33{
34 char *sense = irb->ecw;
35
36 /* check for successful execution first */
37 if (irb->scsw.cstat == 0x00 &&
38 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
39 return dasd_era_none;
40 if (sense[0] & 0x80) { /* CMD reject */
41 return dasd_era_fatal;
42 }
43 if (sense[0] & 0x40) { /* Drive offline */
44 return dasd_era_recover;
45 }
46 if (sense[0] & 0x20) { /* Bus out parity */
47 return dasd_era_recover;
48 }
49 if (sense[0] & 0x10) { /* equipment check */
50 if (sense[1] & 0x80) {
51 return dasd_era_fatal;
52 }
53 return dasd_era_recover;
54 }
55 if (sense[0] & 0x08) { /* data check */
56 if (sense[1] & 0x80) {
57 return dasd_era_fatal;
58 }
59 return dasd_era_recover;
60 }
61 if (sense[0] & 0x04) { /* overrun */
62 if (sense[1] & 0x80) {
63 return dasd_era_fatal;
64 }
65 return dasd_era_recover;
66 }
67 if (sense[1] & 0x40) { /* invalid blocksize */
68 return dasd_era_fatal;
69 }
70 if (sense[1] & 0x04) { /* file protected */
71 return dasd_era_recover;
72 }
73 if (sense[1] & 0x01) { /* operation incomplete */
74 return dasd_era_recover;
75 }
76 if (sense[2] & 0x80) { /* check data erroor */
77 return dasd_era_recover;
78 }
79 if (sense[2] & 0x10) { /* Env. data present */
80 return dasd_era_recover;
81 }
82 /* examine the 24 byte sense data */
83 return dasd_era_recover;
84
85} /* END dasd_3370_erp_examine */
86
87/*
88 * Overrides for Emacs so that we follow Linus's tabbing style.
89 * Emacs will notice this stuff at the end of the file and automatically
90 * adjust the settings for this buffer only. This must remain at the end
91 * of the file.
92 * ---------------------------------------------------------------------------
93 * Local variables:
94 * c-indent-level: 4
95 * c-brace-imaginary-offset: 0
96 * c-brace-offset: -4
97 * c-argdecl-indent: 4
98 * c-label-offset: -4
99 * c-continued-statement-offset: 4
100 * c-continued-brace-offset: 0
101 * indent-tabs-mode: 1
102 * tab-width: 8
103 * End:
104 */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
new file mode 100644
index 000000000000..c143ecb53d9d
--- /dev/null
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -0,0 +1,2742 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_3990_erp.c
3 * Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
4 * Holger Smolinski <Holger.Smolinski@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com>
6 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
7 *
8 * $Revision: 1.36 $
9 */
10
11#include <linux/timer.h>
12#include <linux/slab.h>
13#include <asm/idals.h>
14#include <asm/todclk.h>
15
16#define PRINTK_HEADER "dasd_erp(3990): "
17
18#include "dasd_int.h"
19#include "dasd_eckd.h"
20
21
22struct DCTL_data {
23 unsigned char subcommand; /* e.g Inhibit Write, Enable Write,... */
24 unsigned char modifier; /* Subcommand modifier */
25 unsigned short res; /* reserved */
26} __attribute__ ((packed));
27
28/*
29 *****************************************************************************
30 * SECTION ERP EXAMINATION
31 *****************************************************************************
32 */
33
34/*
35 * DASD_3990_ERP_EXAMINE_24
36 *
37 * DESCRIPTION
38 * Checks only for fatal (unrecoverable) error.
39 * A detailed examination of the sense data is done later outside
40 * the interrupt handler.
41 *
42 * Each bit configuration leading to an action code 2 (Exit with
43 * programming error or unusual condition indication)
44 * are handled as fatal error´s.
45 *
46 * All other configurations are handled as recoverable errors.
47 *
48 * RETURN VALUES
49 * dasd_era_fatal for all fatal (unrecoverable errors)
50 * dasd_era_recover for all others.
51 */
52static dasd_era_t
53dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
54{
55
56 struct dasd_device *device = cqr->device;
57
58 /* check for 'Command Reject' */
59 if ((sense[0] & SNS0_CMD_REJECT) &&
60 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
61
62 DEV_MESSAGE(KERN_ERR, device, "%s",
63 "EXAMINE 24: Command Reject detected - "
64 "fatal error");
65
66 return dasd_era_fatal;
67 }
68
69 /* check for 'Invalid Track Format' */
70 if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
71 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
72
73 DEV_MESSAGE(KERN_ERR, device, "%s",
74 "EXAMINE 24: Invalid Track Format detected "
75 "- fatal error");
76
77 return dasd_era_fatal;
78 }
79
80 /* check for 'No Record Found' */
81 if (sense[1] & SNS1_NO_REC_FOUND) {
82
83 /* FIXME: fatal error ?!? */
84 DEV_MESSAGE(KERN_ERR, device,
85 "EXAMINE 24: No Record Found detected %s",
86 device->state <= DASD_STATE_BASIC ?
87 " " : "- fatal error");
88
89 return dasd_era_fatal;
90 }
91
92 /* return recoverable for all others */
93 return dasd_era_recover;
94} /* END dasd_3990_erp_examine_24 */
95
96/*
97 * DASD_3990_ERP_EXAMINE_32
98 *
99 * DESCRIPTION
100 * Checks only for fatal/no/recoverable error.
101 * A detailed examination of the sense data is done later outside
102 * the interrupt handler.
103 *
104 * RETURN VALUES
105 * dasd_era_none no error
106 * dasd_era_fatal for all fatal (unrecoverable errors)
107 * dasd_era_recover for recoverable others.
108 */
109static dasd_era_t
110dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
111{
112
113 struct dasd_device *device = cqr->device;
114
115 switch (sense[25]) {
116 case 0x00:
117 return dasd_era_none;
118
119 case 0x01:
120 DEV_MESSAGE(KERN_ERR, device, "%s", "EXAMINE 32: fatal error");
121
122 return dasd_era_fatal;
123
124 default:
125
126 return dasd_era_recover;
127 }
128
129} /* end dasd_3990_erp_examine_32 */
130
131/*
132 * DASD_3990_ERP_EXAMINE
133 *
134 * DESCRIPTION
135 * Checks only for fatal/no/recover error.
136 * A detailed examination of the sense data is done later outside
137 * the interrupt handler.
138 *
139 * The logic is based on the 'IBM 3990 Storage Control Reference' manual
140 * 'Chapter 7. Error Recovery Procedures'.
141 *
142 * RETURN VALUES
143 * dasd_era_none no error
144 * dasd_era_fatal for all fatal (unrecoverable errors)
145 * dasd_era_recover for all others.
146 */
147dasd_era_t
148dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
149{
150
151 char *sense = irb->ecw;
152 dasd_era_t era = dasd_era_recover;
153 struct dasd_device *device = cqr->device;
154
155 /* check for successful execution first */
156 if (irb->scsw.cstat == 0x00 &&
157 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
158 return dasd_era_none;
159
160 /* distinguish between 24 and 32 byte sense data */
161 if (sense[27] & DASD_SENSE_BIT_0) {
162
163 era = dasd_3990_erp_examine_24(cqr, sense);
164
165 } else {
166
167 era = dasd_3990_erp_examine_32(cqr, sense);
168
169 }
170
171 /* log the erp chain if fatal error occurred */
172 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
173 dasd_log_sense(cqr, irb);
174 dasd_log_ccw(cqr, 0, irb->scsw.cpa);
175 }
176
177 return era;
178
179} /* END dasd_3990_erp_examine */
180
181/*
182 *****************************************************************************
183 * SECTION ERP HANDLING
184 *****************************************************************************
185 */
186/*
187 *****************************************************************************
188 * 24 and 32 byte sense ERP functions
189 *****************************************************************************
190 */
191
192/*
193 * DASD_3990_ERP_CLEANUP
194 *
195 * DESCRIPTION
196 * Removes the already build but not necessary ERP request and sets
197 * the status of the original cqr / erp to the given (final) status
198 *
199 * PARAMETER
200 * erp request to be blocked
201 * final_status either DASD_CQR_DONE or DASD_CQR_FAILED
202 *
203 * RETURN VALUES
204 * cqr original cqr
205 */
206static struct dasd_ccw_req *
207dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
208{
209 struct dasd_ccw_req *cqr = erp->refers;
210
211 dasd_free_erp_request(erp, erp->device);
212 cqr->status = final_status;
213 return cqr;
214
215} /* end dasd_3990_erp_cleanup */
216
217/*
218 * DASD_3990_ERP_BLOCK_QUEUE
219 *
220 * DESCRIPTION
221 * Block the given device request queue to prevent from further
222 * processing until the started timer has expired or an related
223 * interrupt was received.
224 */
225static void
226dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
227{
228
229 struct dasd_device *device = erp->device;
230
231 DEV_MESSAGE(KERN_INFO, device,
232 "blocking request queue for %is", expires/HZ);
233
234 device->stopped |= DASD_STOPPED_PENDING;
235 erp->status = DASD_CQR_QUEUED;
236
237 dasd_set_timer(device, expires);
238}
239
240/*
241 * DASD_3990_ERP_INT_REQ
242 *
243 * DESCRIPTION
244 * Handles 'Intervention Required' error.
245 * This means either device offline or not installed.
246 *
247 * PARAMETER
248 * erp current erp
249 * RETURN VALUES
250 * erp modified erp
251 */
252static struct dasd_ccw_req *
253dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
254{
255
256 struct dasd_device *device = erp->device;
257
258 /* first time set initial retry counter and erp_function */
259 /* and retry once without blocking queue */
260 /* (this enables easier enqueing of the cqr) */
261 if (erp->function != dasd_3990_erp_int_req) {
262
263 erp->retries = 256;
264 erp->function = dasd_3990_erp_int_req;
265
266 } else {
267
268 /* issue a message and wait for 'device ready' interrupt */
269 DEV_MESSAGE(KERN_ERR, device, "%s",
270 "is offline or not installed - "
271 "INTERVENTION REQUIRED!!");
272
273 dasd_3990_erp_block_queue(erp, 60*HZ);
274 }
275
276 return erp;
277
278} /* end dasd_3990_erp_int_req */
279
280/*
281 * DASD_3990_ERP_ALTERNATE_PATH
282 *
283 * DESCRIPTION
284 * Repeat the operation on a different channel path.
285 * If all alternate paths have been tried, the request is posted with a
286 * permanent error.
287 *
288 * PARAMETER
289 * erp pointer to the current ERP
290 *
291 * RETURN VALUES
292 * erp modified pointer to the ERP
293 */
294static void
295dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
296{
297 struct dasd_device *device = erp->device;
298 __u8 opm;
299
300 /* try alternate valid path */
301 opm = ccw_device_get_path_mask(device->cdev);
302 //FIXME: start with get_opm ?
303 if (erp->lpm == 0)
304 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
305 else
306 erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
307
308 if ((erp->lpm & opm) != 0x00) {
309
310 DEV_MESSAGE(KERN_DEBUG, device,
311 "try alternate lpm=%x (lpum=%x / opm=%x)",
312 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
313
314 /* reset status to queued to handle the request again... */
315 if (erp->status > DASD_CQR_QUEUED)
316 erp->status = DASD_CQR_QUEUED;
317 erp->retries = 1;
318 } else {
319 DEV_MESSAGE(KERN_ERR, device,
320 "No alternate channel path left (lpum=%x / "
321 "opm=%x) -> permanent error",
322 erp->irb.esw.esw0.sublog.lpum, opm);
323
324 /* post request with permanent error */
325 if (erp->status > DASD_CQR_QUEUED)
326 erp->status = DASD_CQR_FAILED;
327 }
328} /* end dasd_3990_erp_alternate_path */
329
330/*
331 * DASD_3990_ERP_DCTL
332 *
333 * DESCRIPTION
334 * Setup cqr to do the Diagnostic Control (DCTL) command with an
335 * Inhibit Write subcommand (0x20) and the given modifier.
336 *
337 * PARAMETER
338 * erp pointer to the current (failed) ERP
339 * modifier subcommand modifier
340 *
341 * RETURN VALUES
342 * dctl_cqr pointer to NEW dctl_cqr
343 *
344 */
345static struct dasd_ccw_req *
346dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
347{
348
349 struct dasd_device *device = erp->device;
350 struct DCTL_data *DCTL_data;
351 struct ccw1 *ccw;
352 struct dasd_ccw_req *dctl_cqr;
353
354 dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
355 sizeof (struct DCTL_data),
356 erp->device);
357 if (IS_ERR(dctl_cqr)) {
358 DEV_MESSAGE(KERN_ERR, device, "%s",
359 "Unable to allocate DCTL-CQR");
360 erp->status = DASD_CQR_FAILED;
361 return erp;
362 }
363
364 DCTL_data = dctl_cqr->data;
365
366 DCTL_data->subcommand = 0x02; /* Inhibit Write */
367 DCTL_data->modifier = modifier;
368
369 ccw = dctl_cqr->cpaddr;
370 memset(ccw, 0, sizeof (struct ccw1));
371 ccw->cmd_code = CCW_CMD_DCTL;
372 ccw->count = 4;
373 ccw->cda = (__u32)(addr_t) DCTL_data;
374 dctl_cqr->function = dasd_3990_erp_DCTL;
375 dctl_cqr->refers = erp;
376 dctl_cqr->device = erp->device;
377 dctl_cqr->magic = erp->magic;
378 dctl_cqr->expires = 5 * 60 * HZ;
379 dctl_cqr->retries = 2;
380
381 dctl_cqr->buildclk = get_clock();
382
383 dctl_cqr->status = DASD_CQR_FILLED;
384
385 return dctl_cqr;
386
387} /* end dasd_3990_erp_DCTL */
388
389/*
390 * DASD_3990_ERP_ACTION_1
391 *
392 * DESCRIPTION
393 * Setup ERP to do the ERP action 1 (see Reference manual).
394 * Repeat the operation on a different channel path.
395 * If all alternate paths have been tried, the request is posted with a
396 * permanent error.
397 * Note: duplex handling is not implemented (yet).
398 *
399 * PARAMETER
400 * erp pointer to the current ERP
401 *
402 * RETURN VALUES
403 * erp pointer to the ERP
404 *
405 */
406static struct dasd_ccw_req *
407dasd_3990_erp_action_1(struct dasd_ccw_req * erp)
408{
409
410 erp->function = dasd_3990_erp_action_1;
411
412 dasd_3990_erp_alternate_path(erp);
413
414 return erp;
415
416} /* end dasd_3990_erp_action_1 */
417
418/*
419 * DASD_3990_ERP_ACTION_4
420 *
421 * DESCRIPTION
422 * Setup ERP to do the ERP action 4 (see Reference manual).
423 * Set the current request to PENDING to block the CQR queue for that device
424 * until the state change interrupt appears.
425 * Use a timer (20 seconds) to retry the cqr if the interrupt is still
426 * missing.
427 *
428 * PARAMETER
429 * sense sense data of the actual error
430 * erp pointer to the current ERP
431 *
432 * RETURN VALUES
433 * erp pointer to the ERP
434 *
435 */
436static struct dasd_ccw_req *
437dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
438{
439
440 struct dasd_device *device = erp->device;
441
442 /* first time set initial retry counter and erp_function */
443 /* and retry once without waiting for state change pending */
444 /* interrupt (this enables easier enqueing of the cqr) */
445 if (erp->function != dasd_3990_erp_action_4) {
446
447 DEV_MESSAGE(KERN_INFO, device, "%s",
448 "dasd_3990_erp_action_4: first time retry");
449
450 erp->retries = 256;
451 erp->function = dasd_3990_erp_action_4;
452
453 } else {
454
455 if (sense[25] == 0x1D) { /* state change pending */
456
457 DEV_MESSAGE(KERN_INFO, device,
458 "waiting for state change pending "
459 "interrupt, %d retries left",
460 erp->retries);
461
462 dasd_3990_erp_block_queue(erp, 30*HZ);
463
464 } else if (sense[25] == 0x1E) { /* busy */
465 DEV_MESSAGE(KERN_INFO, device,
466 "busy - redriving request later, "
467 "%d retries left",
468 erp->retries);
469 dasd_3990_erp_block_queue(erp, HZ);
470 } else {
471
472 /* no state change pending - retry */
473 DEV_MESSAGE (KERN_INFO, device,
474 "redriving request immediately, "
475 "%d retries left",
476 erp->retries);
477 erp->status = DASD_CQR_QUEUED;
478 }
479 }
480
481 return erp;
482
483} /* end dasd_3990_erp_action_4 */
484
485/*
486 *****************************************************************************
487 * 24 byte sense ERP functions (only)
488 *****************************************************************************
489 */
490
491/*
492 * DASD_3990_ERP_ACTION_5
493 *
494 * DESCRIPTION
495 * Setup ERP to do the ERP action 5 (see Reference manual).
496 * NOTE: Further handling is done in xxx_further_erp after the retries.
497 *
498 * PARAMETER
499 * erp pointer to the current ERP
500 *
501 * RETURN VALUES
502 * erp pointer to the ERP
503 *
504 */
505static struct dasd_ccw_req *
506dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
507{
508
509 /* first of all retry */
510 erp->retries = 10;
511 erp->function = dasd_3990_erp_action_5;
512
513 return erp;
514
515} /* end dasd_3990_erp_action_5 */
516
517/*
518 * DASD_3990_HANDLE_ENV_DATA
519 *
520 * DESCRIPTION
521 * Handles 24 byte 'Environmental data present'.
522 * Does a analysis of the sense data (message Format)
523 * and prints the error messages.
524 *
525 * PARAMETER
526 * sense current sense data
527 *
528 * RETURN VALUES
529 * void
530 */
531static void
532dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
533{
534
535 struct dasd_device *device = erp->device;
536 char msg_format = (sense[7] & 0xF0);
537 char msg_no = (sense[7] & 0x0F);
538
539 switch (msg_format) {
540 case 0x00: /* Format 0 - Program or System Checks */
541
542 if (sense[1] & 0x10) { /* check message to operator bit */
543
544 switch (msg_no) {
545 case 0x00: /* No Message */
546 break;
547 case 0x01:
548 DEV_MESSAGE(KERN_WARNING, device, "%s",
549 "FORMAT 0 - Invalid Command");
550 break;
551 case 0x02:
552 DEV_MESSAGE(KERN_WARNING, device, "%s",
553 "FORMAT 0 - Invalid Command "
554 "Sequence");
555 break;
556 case 0x03:
557 DEV_MESSAGE(KERN_WARNING, device, "%s",
558 "FORMAT 0 - CCW Count less than "
559 "required");
560 break;
561 case 0x04:
562 DEV_MESSAGE(KERN_WARNING, device, "%s",
563 "FORMAT 0 - Invalid Parameter");
564 break;
565 case 0x05:
566 DEV_MESSAGE(KERN_WARNING, device, "%s",
567 "FORMAT 0 - Diagnostic of Sepecial"
568 " Command Violates File Mask");
569 break;
570 case 0x07:
571 DEV_MESSAGE(KERN_WARNING, device, "%s",
572 "FORMAT 0 - Channel Returned with "
573 "Incorrect retry CCW");
574 break;
575 case 0x08:
576 DEV_MESSAGE(KERN_WARNING, device, "%s",
577 "FORMAT 0 - Reset Notification");
578 break;
579 case 0x09:
580 DEV_MESSAGE(KERN_WARNING, device, "%s",
581 "FORMAT 0 - Storage Path Restart");
582 break;
583 case 0x0A:
584 DEV_MESSAGE(KERN_WARNING, device,
585 "FORMAT 0 - Channel requested "
586 "... %02x", sense[8]);
587 break;
588 case 0x0B:
589 DEV_MESSAGE(KERN_WARNING, device, "%s",
590 "FORMAT 0 - Invalid Defective/"
591 "Alternate Track Pointer");
592 break;
593 case 0x0C:
594 DEV_MESSAGE(KERN_WARNING, device, "%s",
595 "FORMAT 0 - DPS Installation "
596 "Check");
597 break;
598 case 0x0E:
599 DEV_MESSAGE(KERN_WARNING, device, "%s",
600 "FORMAT 0 - Command Invalid on "
601 "Secondary Address");
602 break;
603 case 0x0F:
604 DEV_MESSAGE(KERN_WARNING, device,
605 "FORMAT 0 - Status Not As "
606 "Required: reason %02x", sense[8]);
607 break;
608 default:
609 DEV_MESSAGE(KERN_WARNING, device, "%s",
610 "FORMAT 0 - Reseved");
611 }
612 } else {
613 switch (msg_no) {
614 case 0x00: /* No Message */
615 break;
616 case 0x01:
617 DEV_MESSAGE(KERN_WARNING, device, "%s",
618 "FORMAT 0 - Device Error Source");
619 break;
620 case 0x02:
621 DEV_MESSAGE(KERN_WARNING, device, "%s",
622 "FORMAT 0 - Reserved");
623 break;
624 case 0x03:
625 DEV_MESSAGE(KERN_WARNING, device,
626 "FORMAT 0 - Device Fenced - "
627 "device = %02x", sense[4]);
628 break;
629 case 0x04:
630 DEV_MESSAGE(KERN_WARNING, device, "%s",
631 "FORMAT 0 - Data Pinned for "
632 "Device");
633 break;
634 default:
635 DEV_MESSAGE(KERN_WARNING, device, "%s",
636 "FORMAT 0 - Reserved");
637 }
638 }
639 break;
640
641 case 0x10: /* Format 1 - Device Equipment Checks */
642 switch (msg_no) {
643 case 0x00: /* No Message */
644 break;
645 case 0x01:
646 DEV_MESSAGE(KERN_WARNING, device, "%s",
647 "FORMAT 1 - Device Status 1 not as "
648 "expected");
649 break;
650 case 0x03:
651 DEV_MESSAGE(KERN_WARNING, device, "%s",
652 "FORMAT 1 - Index missing");
653 break;
654 case 0x04:
655 DEV_MESSAGE(KERN_WARNING, device, "%s",
656 "FORMAT 1 - Interruption cannot be reset");
657 break;
658 case 0x05:
659 DEV_MESSAGE(KERN_WARNING, device, "%s",
660 "FORMAT 1 - Device did not respond to "
661 "selection");
662 break;
663 case 0x06:
664 DEV_MESSAGE(KERN_WARNING, device, "%s",
665 "FORMAT 1 - Device check-2 error or Set "
666 "Sector is not complete");
667 break;
668 case 0x07:
669 DEV_MESSAGE(KERN_WARNING, device, "%s",
670 "FORMAT 1 - Head address does not "
671 "compare");
672 break;
673 case 0x08:
674 DEV_MESSAGE(KERN_WARNING, device, "%s",
675 "FORMAT 1 - Device status 1 not valid");
676 break;
677 case 0x09:
678 DEV_MESSAGE(KERN_WARNING, device, "%s",
679 "FORMAT 1 - Device not ready");
680 break;
681 case 0x0A:
682 DEV_MESSAGE(KERN_WARNING, device, "%s",
683 "FORMAT 1 - Track physical address did "
684 "not compare");
685 break;
686 case 0x0B:
687 DEV_MESSAGE(KERN_WARNING, device, "%s",
688 "FORMAT 1 - Missing device address bit");
689 break;
690 case 0x0C:
691 DEV_MESSAGE(KERN_WARNING, device, "%s",
692 "FORMAT 1 - Drive motor switch is off");
693 break;
694 case 0x0D:
695 DEV_MESSAGE(KERN_WARNING, device, "%s",
696 "FORMAT 1 - Seek incomplete");
697 break;
698 case 0x0E:
699 DEV_MESSAGE(KERN_WARNING, device, "%s",
700 "FORMAT 1 - Cylinder address did not "
701 "compare");
702 break;
703 case 0x0F:
704 DEV_MESSAGE(KERN_WARNING, device, "%s",
705 "FORMAT 1 - Offset active cannot be "
706 "reset");
707 break;
708 default:
709 DEV_MESSAGE(KERN_WARNING, device, "%s",
710 "FORMAT 1 - Reserved");
711 }
712 break;
713
714 case 0x20: /* Format 2 - 3990 Equipment Checks */
715 switch (msg_no) {
716 case 0x08:
717 DEV_MESSAGE(KERN_WARNING, device, "%s",
718 "FORMAT 2 - 3990 check-2 error");
719 break;
720 case 0x0E:
721 DEV_MESSAGE(KERN_WARNING, device, "%s",
722 "FORMAT 2 - Support facility errors");
723 break;
724 case 0x0F:
725 DEV_MESSAGE(KERN_WARNING, device,
726 "FORMAT 2 - Microcode detected error %02x",
727 sense[8]);
728 break;
729 default:
730 DEV_MESSAGE(KERN_WARNING, device, "%s",
731 "FORMAT 2 - Reserved");
732 }
733 break;
734
735 case 0x30: /* Format 3 - 3990 Control Checks */
736 switch (msg_no) {
737 case 0x0F:
738 DEV_MESSAGE(KERN_WARNING, device, "%s",
739 "FORMAT 3 - Allegiance terminated");
740 break;
741 default:
742 DEV_MESSAGE(KERN_WARNING, device, "%s",
743 "FORMAT 3 - Reserved");
744 }
745 break;
746
747 case 0x40: /* Format 4 - Data Checks */
748 switch (msg_no) {
749 case 0x00:
750 DEV_MESSAGE(KERN_WARNING, device, "%s",
751 "FORMAT 4 - Home address area error");
752 break;
753 case 0x01:
754 DEV_MESSAGE(KERN_WARNING, device, "%s",
755 "FORMAT 4 - Count area error");
756 break;
757 case 0x02:
758 DEV_MESSAGE(KERN_WARNING, device, "%s",
759 "FORMAT 4 - Key area error");
760 break;
761 case 0x03:
762 DEV_MESSAGE(KERN_WARNING, device, "%s",
763 "FORMAT 4 - Data area error");
764 break;
765 case 0x04:
766 DEV_MESSAGE(KERN_WARNING, device, "%s",
767 "FORMAT 4 - No sync byte in home address "
768 "area");
769 break;
770 case 0x05:
771 DEV_MESSAGE(KERN_WARNING, device, "%s",
772 "FORMAT 4 - No sync byte in count address "
773 "area");
774 break;
775 case 0x06:
776 DEV_MESSAGE(KERN_WARNING, device, "%s",
777 "FORMAT 4 - No sync byte in key area");
778 break;
779 case 0x07:
780 DEV_MESSAGE(KERN_WARNING, device, "%s",
781 "FORMAT 4 - No sync byte in data area");
782 break;
783 case 0x08:
784 DEV_MESSAGE(KERN_WARNING, device, "%s",
785 "FORMAT 4 - Home address area error; "
786 "offset active");
787 break;
788 case 0x09:
789 DEV_MESSAGE(KERN_WARNING, device, "%s",
790 "FORMAT 4 - Count area error; offset "
791 "active");
792 break;
793 case 0x0A:
794 DEV_MESSAGE(KERN_WARNING, device, "%s",
795 "FORMAT 4 - Key area error; offset "
796 "active");
797 break;
798 case 0x0B:
799 DEV_MESSAGE(KERN_WARNING, device, "%s",
800 "FORMAT 4 - Data area error; "
801 "offset active");
802 break;
803 case 0x0C:
804 DEV_MESSAGE(KERN_WARNING, device, "%s",
805 "FORMAT 4 - No sync byte in home "
806 "address area; offset active");
807 break;
808 case 0x0D:
809 DEV_MESSAGE(KERN_WARNING, device, "%s",
810 "FORMAT 4 - No syn byte in count "
811 "address area; offset active");
812 break;
813 case 0x0E:
814 DEV_MESSAGE(KERN_WARNING, device, "%s",
815 "FORMAT 4 - No sync byte in key area; "
816 "offset active");
817 break;
818 case 0x0F:
819 DEV_MESSAGE(KERN_WARNING, device, "%s",
820 "FORMAT 4 - No syn byte in data area; "
821 "offset active");
822 break;
823 default:
824 DEV_MESSAGE(KERN_WARNING, device, "%s",
825 "FORMAT 4 - Reserved");
826 }
827 break;
828
829 case 0x50: /* Format 5 - Data Check with displacement information */
830 switch (msg_no) {
831 case 0x00:
832 DEV_MESSAGE(KERN_WARNING, device, "%s",
833 "FORMAT 5 - Data Check in the "
834 "home address area");
835 break;
836 case 0x01:
837 DEV_MESSAGE(KERN_WARNING, device, "%s",
838 "FORMAT 5 - Data Check in the count area");
839 break;
840 case 0x02:
841 DEV_MESSAGE(KERN_WARNING, device, "%s",
842 "FORMAT 5 - Data Check in the key area");
843 break;
844 case 0x03:
845 DEV_MESSAGE(KERN_WARNING, device, "%s",
846 "FORMAT 5 - Data Check in the data area");
847 break;
848 case 0x08:
849 DEV_MESSAGE(KERN_WARNING, device, "%s",
850 "FORMAT 5 - Data Check in the "
851 "home address area; offset active");
852 break;
853 case 0x09:
854 DEV_MESSAGE(KERN_WARNING, device, "%s",
855 "FORMAT 5 - Data Check in the count area; "
856 "offset active");
857 break;
858 case 0x0A:
859 DEV_MESSAGE(KERN_WARNING, device, "%s",
860 "FORMAT 5 - Data Check in the key area; "
861 "offset active");
862 break;
863 case 0x0B:
864 DEV_MESSAGE(KERN_WARNING, device, "%s",
865 "FORMAT 5 - Data Check in the data area; "
866 "offset active");
867 break;
868 default:
869 DEV_MESSAGE(KERN_WARNING, device, "%s",
870 "FORMAT 5 - Reserved");
871 }
872 break;
873
874 case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */
875 switch (msg_no) {
876 case 0x00:
877 DEV_MESSAGE(KERN_WARNING, device, "%s",
878 "FORMAT 6 - Overrun on channel A");
879 break;
880 case 0x01:
881 DEV_MESSAGE(KERN_WARNING, device, "%s",
882 "FORMAT 6 - Overrun on channel B");
883 break;
884 case 0x02:
885 DEV_MESSAGE(KERN_WARNING, device, "%s",
886 "FORMAT 6 - Overrun on channel C");
887 break;
888 case 0x03:
889 DEV_MESSAGE(KERN_WARNING, device, "%s",
890 "FORMAT 6 - Overrun on channel D");
891 break;
892 case 0x04:
893 DEV_MESSAGE(KERN_WARNING, device, "%s",
894 "FORMAT 6 - Overrun on channel E");
895 break;
896 case 0x05:
897 DEV_MESSAGE(KERN_WARNING, device, "%s",
898 "FORMAT 6 - Overrun on channel F");
899 break;
900 case 0x06:
901 DEV_MESSAGE(KERN_WARNING, device, "%s",
902 "FORMAT 6 - Overrun on channel G");
903 break;
904 case 0x07:
905 DEV_MESSAGE(KERN_WARNING, device, "%s",
906 "FORMAT 6 - Overrun on channel H");
907 break;
908 default:
909 DEV_MESSAGE(KERN_WARNING, device, "%s",
910 "FORMAT 6 - Reserved");
911 }
912 break;
913
914 case 0x70: /* Format 7 - Device Connection Control Checks */
915 switch (msg_no) {
916 case 0x00:
917 DEV_MESSAGE(KERN_WARNING, device, "%s",
918 "FORMAT 7 - RCC initiated by a connection "
919 "check alert");
920 break;
921 case 0x01:
922 DEV_MESSAGE(KERN_WARNING, device, "%s",
923 "FORMAT 7 - RCC 1 sequence not "
924 "successful");
925 break;
926 case 0x02:
927 DEV_MESSAGE(KERN_WARNING, device, "%s",
928 "FORMAT 7 - RCC 1 and RCC 2 sequences not "
929 "successful");
930 break;
931 case 0x03:
932 DEV_MESSAGE(KERN_WARNING, device, "%s",
933 "FORMAT 7 - Invalid tag-in during "
934 "selection sequence");
935 break;
936 case 0x04:
937 DEV_MESSAGE(KERN_WARNING, device, "%s",
938 "FORMAT 7 - extra RCC required");
939 break;
940 case 0x05:
941 DEV_MESSAGE(KERN_WARNING, device, "%s",
942 "FORMAT 7 - Invalid DCC selection "
943 "response or timeout");
944 break;
945 case 0x06:
946 DEV_MESSAGE(KERN_WARNING, device, "%s",
947 "FORMAT 7 - Missing end operation; device "
948 "transfer complete");
949 break;
950 case 0x07:
951 DEV_MESSAGE(KERN_WARNING, device, "%s",
952 "FORMAT 7 - Missing end operation; device "
953 "transfer incomplete");
954 break;
955 case 0x08:
956 DEV_MESSAGE(KERN_WARNING, device, "%s",
957 "FORMAT 7 - Invalid tag-in for an "
958 "immediate command sequence");
959 break;
960 case 0x09:
961 DEV_MESSAGE(KERN_WARNING, device, "%s",
962 "FORMAT 7 - Invalid tag-in for an "
963 "extended command sequence");
964 break;
965 case 0x0A:
966 DEV_MESSAGE(KERN_WARNING, device, "%s",
967 "FORMAT 7 - 3990 microcode time out when "
968 "stopping selection");
969 break;
970 case 0x0B:
971 DEV_MESSAGE(KERN_WARNING, device, "%s",
972 "FORMAT 7 - No response to selection "
973 "after a poll interruption");
974 break;
975 case 0x0C:
976 DEV_MESSAGE(KERN_WARNING, device, "%s",
977 "FORMAT 7 - Permanent path error (DASD "
978 "controller not available)");
979 break;
980 case 0x0D:
981 DEV_MESSAGE(KERN_WARNING, device, "%s",
982 "FORMAT 7 - DASD controller not available"
983 " on disconnected command chain");
984 break;
985 default:
986 DEV_MESSAGE(KERN_WARNING, device, "%s",
987 "FORMAT 7 - Reserved");
988 }
989 break;
990
991 case 0x80: /* Format 8 - Additional Device Equipment Checks */
992 switch (msg_no) {
993 case 0x00: /* No Message */
994 case 0x01:
995 DEV_MESSAGE(KERN_WARNING, device, "%s",
996 "FORMAT 8 - Error correction code "
997 "hardware fault");
998 break;
999 case 0x03:
1000 DEV_MESSAGE(KERN_WARNING, device, "%s",
1001 "FORMAT 8 - Unexpected end operation "
1002 "response code");
1003 break;
1004 case 0x04:
1005 DEV_MESSAGE(KERN_WARNING, device, "%s",
1006 "FORMAT 8 - End operation with transfer "
1007 "count not zero");
1008 break;
1009 case 0x05:
1010 DEV_MESSAGE(KERN_WARNING, device, "%s",
1011 "FORMAT 8 - End operation with transfer "
1012 "count zero");
1013 break;
1014 case 0x06:
1015 DEV_MESSAGE(KERN_WARNING, device, "%s",
1016 "FORMAT 8 - DPS checks after a system "
1017 "reset or selective reset");
1018 break;
1019 case 0x07:
1020 DEV_MESSAGE(KERN_WARNING, device, "%s",
1021 "FORMAT 8 - DPS cannot be filled");
1022 break;
1023 case 0x08:
1024 DEV_MESSAGE(KERN_WARNING, device, "%s",
1025 "FORMAT 8 - Short busy time-out during "
1026 "device selection");
1027 break;
1028 case 0x09:
1029 DEV_MESSAGE(KERN_WARNING, device, "%s",
1030 "FORMAT 8 - DASD controller failed to "
1031 "set or reset the long busy latch");
1032 break;
1033 case 0x0A:
1034 DEV_MESSAGE(KERN_WARNING, device, "%s",
1035 "FORMAT 8 - No interruption from device "
1036 "during a command chain");
1037 break;
1038 default:
1039 DEV_MESSAGE(KERN_WARNING, device, "%s",
1040 "FORMAT 8 - Reserved");
1041 }
1042 break;
1043
1044 case 0x90: /* Format 9 - Device Read, Write, and Seek Checks */
1045 switch (msg_no) {
1046 case 0x00:
1047 break; /* No Message */
1048 case 0x06:
1049 DEV_MESSAGE(KERN_WARNING, device, "%s",
1050 "FORMAT 9 - Device check-2 error");
1051 break;
1052 case 0x07:
1053 DEV_MESSAGE(KERN_WARNING, device, "%s",
1054 "FORMAT 9 - Head address did not compare");
1055 break;
1056 case 0x0A:
1057 DEV_MESSAGE(KERN_WARNING, device, "%s",
1058 "FORMAT 9 - Track physical address did "
1059 "not compare while oriented");
1060 break;
1061 case 0x0E:
1062 DEV_MESSAGE(KERN_WARNING, device, "%s",
1063 "FORMAT 9 - Cylinder address did not "
1064 "compare");
1065 break;
1066 default:
1067 DEV_MESSAGE(KERN_WARNING, device, "%s",
1068 "FORMAT 9 - Reserved");
1069 }
1070 break;
1071
1072 case 0xF0: /* Format F - Cache Storage Checks */
1073 switch (msg_no) {
1074 case 0x00:
1075 DEV_MESSAGE(KERN_WARNING, device, "%s",
1076 "FORMAT F - Operation Terminated");
1077 break;
1078 case 0x01:
1079 DEV_MESSAGE(KERN_WARNING, device, "%s",
1080 "FORMAT F - Subsystem Processing Error");
1081 break;
1082 case 0x02:
1083 DEV_MESSAGE(KERN_WARNING, device, "%s",
1084 "FORMAT F - Cache or nonvolatile storage "
1085 "equipment failure");
1086 break;
1087 case 0x04:
1088 DEV_MESSAGE(KERN_WARNING, device, "%s",
1089 "FORMAT F - Caching terminated");
1090 break;
1091 case 0x06:
1092 DEV_MESSAGE(KERN_WARNING, device, "%s",
1093 "FORMAT F - Cache fast write access not "
1094 "authorized");
1095 break;
1096 case 0x07:
1097 DEV_MESSAGE(KERN_WARNING, device, "%s",
1098 "FORMAT F - Track format incorrect");
1099 break;
1100 case 0x09:
1101 DEV_MESSAGE(KERN_WARNING, device, "%s",
1102 "FORMAT F - Caching reinitiated");
1103 break;
1104 case 0x0A:
1105 DEV_MESSAGE(KERN_WARNING, device, "%s",
1106 "FORMAT F - Nonvolatile storage "
1107 "terminated");
1108 break;
1109 case 0x0B:
1110 DEV_MESSAGE(KERN_WARNING, device, "%s",
1111 "FORMAT F - Volume is suspended duplex");
1112 break;
1113 case 0x0C:
1114 DEV_MESSAGE(KERN_WARNING, device, "%s",
1115 "FORMAT F - Subsystem status connot be "
1116 "determined");
1117 break;
1118 case 0x0D:
1119 DEV_MESSAGE(KERN_WARNING, device, "%s",
1120 "FORMAT F - Caching status reset to "
1121 "default");
1122 break;
1123 case 0x0E:
1124 DEV_MESSAGE(KERN_WARNING, device, "%s",
1125 "FORMAT F - DASD Fast Write inhibited");
1126 break;
1127 default:
1128 DEV_MESSAGE(KERN_WARNING, device, "%s",
1129 "FORMAT D - Reserved");
1130 }
1131 break;
1132
1133 default: /* unknown message format - should not happen */
1134 DEV_MESSAGE (KERN_WARNING, device,
1135 "unknown message format %02x",
1136 msg_format);
1137 break;
1138 } /* end switch message format */
1139
1140} /* end dasd_3990_handle_env_data */
1141
1142/*
1143 * DASD_3990_ERP_COM_REJ
1144 *
1145 * DESCRIPTION
1146 * Handles 24 byte 'Command Reject' error.
1147 *
1148 * PARAMETER
1149 * erp current erp_head
1150 * sense current sense data
1151 *
1152 * RETURN VALUES
1153 * erp 'new' erp_head - pointer to new ERP
1154 */
1155static struct dasd_ccw_req *
1156dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
1157{
1158
1159 struct dasd_device *device = erp->device;
1160
1161 erp->function = dasd_3990_erp_com_rej;
1162
1163 /* env data present (ACTION 10 - retry should work) */
1164 if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1165
1166 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1167 "Command Reject - environmental data present");
1168
1169 dasd_3990_handle_env_data(erp, sense);
1170
1171 erp->retries = 5;
1172
1173 } else {
1174 /* fatal error - set status to FAILED */
1175 DEV_MESSAGE(KERN_ERR, device, "%s",
1176 "Command Reject - Fatal error");
1177
1178 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1179 }
1180
1181 return erp;
1182
1183} /* end dasd_3990_erp_com_rej */
1184
1185/*
1186 * DASD_3990_ERP_BUS_OUT
1187 *
1188 * DESCRIPTION
1189 * Handles 24 byte 'Bus Out Parity Check' error.
1190 *
1191 * PARAMETER
1192 * erp current erp_head
1193 * RETURN VALUES
1194 * erp new erp_head - pointer to new ERP
1195 */
1196static struct dasd_ccw_req *
1197dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
1198{
1199
1200 struct dasd_device *device = erp->device;
1201
1202 /* first time set initial retry counter and erp_function */
1203 /* and retry once without blocking queue */
1204 /* (this enables easier enqueing of the cqr) */
1205 if (erp->function != dasd_3990_erp_bus_out) {
1206 erp->retries = 256;
1207 erp->function = dasd_3990_erp_bus_out;
1208
1209 } else {
1210
1211 /* issue a message and wait for 'device ready' interrupt */
1212 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1213 "bus out parity error or BOPC requested by "
1214 "channel");
1215
1216 dasd_3990_erp_block_queue(erp, 60*HZ);
1217
1218 }
1219
1220 return erp;
1221
1222} /* end dasd_3990_erp_bus_out */
1223
1224/*
1225 * DASD_3990_ERP_EQUIP_CHECK
1226 *
1227 * DESCRIPTION
1228 * Handles 24 byte 'Equipment Check' error.
1229 *
1230 * PARAMETER
1231 * erp current erp_head
1232 * RETURN VALUES
1233 * erp new erp_head - pointer to new ERP
1234 */
1235static struct dasd_ccw_req *
1236dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1237{
1238
1239 struct dasd_device *device = erp->device;
1240
1241 erp->function = dasd_3990_erp_equip_check;
1242
1243 if (sense[1] & SNS1_WRITE_INHIBITED) {
1244
1245 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1246 "Write inhibited path encountered");
1247
1248 /* vary path offline */
1249 DEV_MESSAGE(KERN_ERR, device, "%s",
1250 "Path should be varied off-line. "
1251 "This is not implemented yet \n - please report "
1252 "to linux390@de.ibm.com");
1253
1254 erp = dasd_3990_erp_action_1(erp);
1255
1256 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1257
1258 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1259 "Equipment Check - " "environmental data present");
1260
1261 dasd_3990_handle_env_data(erp, sense);
1262
1263 erp = dasd_3990_erp_action_4(erp, sense);
1264
1265 } else if (sense[1] & SNS1_PERM_ERR) {
1266
1267 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1268 "Equipment Check - retry exhausted or "
1269 "undesirable");
1270
1271 erp = dasd_3990_erp_action_1(erp);
1272
1273 } else {
1274 /* all other equipment checks - Action 5 */
1275 /* rest is done when retries == 0 */
1276 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1277 "Equipment check or processing error");
1278
1279 erp = dasd_3990_erp_action_5(erp);
1280 }
1281
1282 return erp;
1283
1284} /* end dasd_3990_erp_equip_check */
1285
1286/*
1287 * DASD_3990_ERP_DATA_CHECK
1288 *
1289 * DESCRIPTION
1290 * Handles 24 byte 'Data Check' error.
1291 *
1292 * PARAMETER
1293 * erp current erp_head
1294 * RETURN VALUES
1295 * erp new erp_head - pointer to new ERP
1296 */
1297static struct dasd_ccw_req *
1298dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1299{
1300
1301 struct dasd_device *device = erp->device;
1302
1303 erp->function = dasd_3990_erp_data_check;
1304
1305 if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */
1306
1307 /* issue message that the data has been corrected */
1308 DEV_MESSAGE(KERN_EMERG, device, "%s",
1309 "Data recovered during retry with PCI "
1310 "fetch mode active");
1311
1312 /* not possible to handle this situation in Linux */
1313 panic("No way to inform application about the possibly "
1314 "incorrect data");
1315
1316 } else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1317
1318 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1319 "Uncorrectable data check recovered secondary "
1320 "addr of duplex pair");
1321
1322 erp = dasd_3990_erp_action_4(erp, sense);
1323
1324 } else if (sense[1] & SNS1_PERM_ERR) {
1325
1326 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1327 "Uncorrectable data check with internal "
1328 "retry exhausted");
1329
1330 erp = dasd_3990_erp_action_1(erp);
1331
1332 } else {
1333 /* all other data checks */
1334 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1335 "Uncorrectable data check with retry count "
1336 "exhausted...");
1337
1338 erp = dasd_3990_erp_action_5(erp);
1339 }
1340
1341 return erp;
1342
1343} /* end dasd_3990_erp_data_check */
1344
1345/*
1346 * DASD_3990_ERP_OVERRUN
1347 *
1348 * DESCRIPTION
1349 * Handles 24 byte 'Overrun' error.
1350 *
1351 * PARAMETER
1352 * erp current erp_head
1353 * RETURN VALUES
1354 * erp new erp_head - pointer to new ERP
1355 */
1356static struct dasd_ccw_req *
1357dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
1358{
1359
1360 struct dasd_device *device = erp->device;
1361
1362 erp->function = dasd_3990_erp_overrun;
1363
1364 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1365 "Overrun - service overrun or overrun"
1366 " error requested by channel");
1367
1368 erp = dasd_3990_erp_action_5(erp);
1369
1370 return erp;
1371
1372} /* end dasd_3990_erp_overrun */
1373
1374/*
1375 * DASD_3990_ERP_INV_FORMAT
1376 *
1377 * DESCRIPTION
1378 * Handles 24 byte 'Invalid Track Format' error.
1379 *
1380 * PARAMETER
1381 * erp current erp_head
1382 * RETURN VALUES
1383 * erp new erp_head - pointer to new ERP
1384 */
1385static struct dasd_ccw_req *
1386dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1387{
1388
1389 struct dasd_device *device = erp->device;
1390
1391 erp->function = dasd_3990_erp_inv_format;
1392
1393 if (sense[2] & SNS2_ENV_DATA_PRESENT) {
1394
1395 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1396 "Track format error when destaging or "
1397 "staging data");
1398
1399 dasd_3990_handle_env_data(erp, sense);
1400
1401 erp = dasd_3990_erp_action_4(erp, sense);
1402
1403 } else {
1404 DEV_MESSAGE(KERN_ERR, device, "%s",
1405 "Invalid Track Format - Fatal error should have "
1406 "been handled within the interrupt handler");
1407
1408 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1409 }
1410
1411 return erp;
1412
1413} /* end dasd_3990_erp_inv_format */
1414
1415/*
1416 * DASD_3990_ERP_EOC
1417 *
1418 * DESCRIPTION
1419 * Handles 24 byte 'End-of-Cylinder' error.
1420 *
1421 * PARAMETER
1422 * erp already added default erp
1423 * RETURN VALUES
1424 * erp pointer to original (failed) cqr.
1425 */
1426static struct dasd_ccw_req *
1427dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
1428{
1429
1430 struct dasd_device *device = default_erp->device;
1431
1432 DEV_MESSAGE(KERN_ERR, device, "%s",
1433 "End-of-Cylinder - must never happen");
1434
1435 /* implement action 7 - BUG */
1436 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1437
1438} /* end dasd_3990_erp_EOC */
1439
1440/*
1441 * DASD_3990_ERP_ENV_DATA
1442 *
1443 * DESCRIPTION
1444 * Handles 24 byte 'Environmental-Data Present' error.
1445 *
1446 * PARAMETER
1447 * erp current erp_head
1448 * RETURN VALUES
1449 * erp new erp_head - pointer to new ERP
1450 */
1451static struct dasd_ccw_req *
1452dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
1453{
1454
1455 struct dasd_device *device = erp->device;
1456
1457 erp->function = dasd_3990_erp_env_data;
1458
1459 DEV_MESSAGE(KERN_DEBUG, device, "%s", "Environmental data present");
1460
1461 dasd_3990_handle_env_data(erp, sense);
1462
1463 /* don't retry on disabled interface */
1464 if (sense[7] != 0x0F) {
1465
1466 erp = dasd_3990_erp_action_4(erp, sense);
1467 } else {
1468
1469 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_IN_IO);
1470 }
1471
1472 return erp;
1473
1474} /* end dasd_3990_erp_env_data */
1475
1476/*
1477 * DASD_3990_ERP_NO_REC
1478 *
1479 * DESCRIPTION
1480 * Handles 24 byte 'No Record Found' error.
1481 *
1482 * PARAMETER
1483 * erp already added default ERP
1484 *
1485 * RETURN VALUES
1486 * erp new erp_head - pointer to new ERP
1487 */
1488static struct dasd_ccw_req *
1489dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
1490{
1491
1492 struct dasd_device *device = default_erp->device;
1493
1494 DEV_MESSAGE(KERN_ERR, device, "%s",
1495 "No Record Found - Fatal error should "
1496 "have been handled within the interrupt handler");
1497
1498 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1499
1500} /* end dasd_3990_erp_no_rec */
1501
1502/*
1503 * DASD_3990_ERP_FILE_PROT
1504 *
1505 * DESCRIPTION
1506 * Handles 24 byte 'File Protected' error.
1507 * Note: Seek related recovery is not implemented because
1508 * wee don't use the seek command yet.
1509 *
1510 * PARAMETER
1511 * erp current erp_head
1512 * RETURN VALUES
1513 * erp new erp_head - pointer to new ERP
1514 */
1515static struct dasd_ccw_req *
1516dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
1517{
1518
1519 struct dasd_device *device = erp->device;
1520
1521 DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected");
1522
1523 return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1524
1525} /* end dasd_3990_erp_file_prot */
1526
1527/*
1528 * DASD_3990_ERP_INSPECT_24
1529 *
1530 * DESCRIPTION
1531 * Does a detailed inspection of the 24 byte sense data
1532 * and sets up a related error recovery action.
1533 *
1534 * PARAMETER
1535 * sense sense data of the actual error
1536 * erp pointer to the currently created default ERP
1537 *
1538 * RETURN VALUES
1539 * erp pointer to the (addtitional) ERP
1540 */
1541static struct dasd_ccw_req *
1542dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
1543{
1544
1545 struct dasd_ccw_req *erp_filled = NULL;
1546
1547 /* Check sense for .... */
1548 /* 'Command Reject' */
1549 if ((erp_filled == NULL) && (sense[0] & SNS0_CMD_REJECT)) {
1550 erp_filled = dasd_3990_erp_com_rej(erp, sense);
1551 }
1552 /* 'Intervention Required' */
1553 if ((erp_filled == NULL) && (sense[0] & SNS0_INTERVENTION_REQ)) {
1554 erp_filled = dasd_3990_erp_int_req(erp);
1555 }
1556 /* 'Bus Out Parity Check' */
1557 if ((erp_filled == NULL) && (sense[0] & SNS0_BUS_OUT_CHECK)) {
1558 erp_filled = dasd_3990_erp_bus_out(erp);
1559 }
1560 /* 'Equipment Check' */
1561 if ((erp_filled == NULL) && (sense[0] & SNS0_EQUIPMENT_CHECK)) {
1562 erp_filled = dasd_3990_erp_equip_check(erp, sense);
1563 }
1564 /* 'Data Check' */
1565 if ((erp_filled == NULL) && (sense[0] & SNS0_DATA_CHECK)) {
1566 erp_filled = dasd_3990_erp_data_check(erp, sense);
1567 }
1568 /* 'Overrun' */
1569 if ((erp_filled == NULL) && (sense[0] & SNS0_OVERRUN)) {
1570 erp_filled = dasd_3990_erp_overrun(erp, sense);
1571 }
1572 /* 'Invalid Track Format' */
1573 if ((erp_filled == NULL) && (sense[1] & SNS1_INV_TRACK_FORMAT)) {
1574 erp_filled = dasd_3990_erp_inv_format(erp, sense);
1575 }
1576 /* 'End-of-Cylinder' */
1577 if ((erp_filled == NULL) && (sense[1] & SNS1_EOC)) {
1578 erp_filled = dasd_3990_erp_EOC(erp, sense);
1579 }
1580 /* 'Environmental Data' */
1581 if ((erp_filled == NULL) && (sense[2] & SNS2_ENV_DATA_PRESENT)) {
1582 erp_filled = dasd_3990_erp_env_data(erp, sense);
1583 }
1584 /* 'No Record Found' */
1585 if ((erp_filled == NULL) && (sense[1] & SNS1_NO_REC_FOUND)) {
1586 erp_filled = dasd_3990_erp_no_rec(erp, sense);
1587 }
1588 /* 'File Protected' */
1589 if ((erp_filled == NULL) && (sense[1] & SNS1_FILE_PROTECTED)) {
1590 erp_filled = dasd_3990_erp_file_prot(erp);
1591 }
1592 /* other (unknown) error - do default ERP */
1593 if (erp_filled == NULL) {
1594
1595 erp_filled = erp;
1596 }
1597
1598 return erp_filled;
1599
1600} /* END dasd_3990_erp_inspect_24 */
1601
1602/*
1603 *****************************************************************************
1604 * 32 byte sense ERP functions (only)
1605 *****************************************************************************
1606 */
1607
1608/*
1609 * DASD_3990_ERPACTION_10_32
1610 *
1611 * DESCRIPTION
1612 * Handles 32 byte 'Action 10' of Single Program Action Codes.
1613 * Just retry and if retry doesn't work, return with error.
1614 *
1615 * PARAMETER
1616 * erp current erp_head
1617 * sense current sense data
1618 * RETURN VALUES
1619 * erp modified erp_head
1620 */
1621static struct dasd_ccw_req *
1622dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
1623{
1624
1625 struct dasd_device *device = erp->device;
1626
1627 erp->retries = 256;
1628 erp->function = dasd_3990_erp_action_10_32;
1629
1630 DEV_MESSAGE(KERN_DEBUG, device, "%s", "Perform logging requested");
1631
1632 return erp;
1633
1634} /* end dasd_3990_erp_action_10_32 */
1635
1636/*
1637 * DASD_3990_ERP_ACTION_1B_32
1638 *
1639 * DESCRIPTION
1640 * Handles 32 byte 'Action 1B' of Single Program Action Codes.
1641 * A write operation could not be finished because of an unexpected
1642 * condition.
1643 * The already created 'default erp' is used to get the link to
1644 * the erp chain, but it can not be used for this recovery
1645 * action because it contains no DE/LO data space.
1646 *
1647 * PARAMETER
1648 * default_erp already added default erp.
1649 * sense current sense data
1650 *
1651 * RETURN VALUES
1652 * erp new erp or
1653 * default_erp in case of imprecise ending or error
1654 */
1655static struct dasd_ccw_req *
1656dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1657{
1658
1659 struct dasd_device *device = default_erp->device;
1660 __u32 cpa = 0;
1661 struct dasd_ccw_req *cqr;
1662 struct dasd_ccw_req *erp;
1663 struct DE_eckd_data *DE_data;
1664 char *LO_data; /* LO_eckd_data_t */
1665 struct ccw1 *ccw;
1666
1667 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1668 "Write not finished because of unexpected condition");
1669
1670 default_erp->function = dasd_3990_erp_action_1B_32;
1671
1672 /* determine the original cqr */
1673 cqr = default_erp;
1674
1675 while (cqr->refers != NULL) {
1676 cqr = cqr->refers;
1677 }
1678
1679 /* for imprecise ending just do default erp */
1680 if (sense[1] & 0x01) {
1681
1682 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1683 "Imprecise ending is set - just retry");
1684
1685 return default_erp;
1686 }
1687
1688 /* determine the address of the CCW to be restarted */
1689 /* Imprecise ending is not set -> addr from IRB-SCSW */
1690 cpa = default_erp->refers->irb.scsw.cpa;
1691
1692 if (cpa == 0) {
1693
1694 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1695 "Unable to determine address of the CCW "
1696 "to be restarted");
1697
1698 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1699 }
1700
1701 /* Build new ERP request including DE/LO */
1702 erp = dasd_alloc_erp_request((char *) &cqr->magic,
1703 2 + 1,/* DE/LO + TIC */
1704 sizeof (struct DE_eckd_data) +
1705 sizeof (struct LO_eckd_data), device);
1706
1707 if (IS_ERR(erp)) {
1708 DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP");
1709 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1710 }
1711
1712 /* use original DE */
1713 DE_data = erp->data;
1714 memcpy(DE_data, cqr->data, sizeof (struct DE_eckd_data));
1715
1716 /* create LO */
1717 LO_data = erp->data + sizeof (struct DE_eckd_data);
1718
1719 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1720
1721 DEV_MESSAGE(KERN_ERR, device, "%s",
1722 "BUG - this should not happen");
1723
1724 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1725 }
1726
1727 if ((sense[7] & 0x3F) == 0x01) {
1728 /* operation code is WRITE DATA -> data area orientation */
1729 LO_data[0] = 0x81;
1730
1731 } else if ((sense[7] & 0x3F) == 0x03) {
1732 /* operation code is FORMAT WRITE -> index orientation */
1733 LO_data[0] = 0xC3;
1734
1735 } else {
1736 LO_data[0] = sense[7]; /* operation */
1737 }
1738
1739 LO_data[1] = sense[8]; /* auxiliary */
1740 LO_data[2] = sense[9];
1741 LO_data[3] = sense[3]; /* count */
1742 LO_data[4] = sense[29]; /* seek_addr.cyl */
1743 LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
1744 LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
1745
1746 memcpy(&(LO_data[8]), &(sense[11]), 8);
1747
1748 /* create DE ccw */
1749 ccw = erp->cpaddr;
1750 memset(ccw, 0, sizeof (struct ccw1));
1751 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
1752 ccw->flags = CCW_FLAG_CC;
1753 ccw->count = 16;
1754 ccw->cda = (__u32)(addr_t) DE_data;
1755
1756 /* create LO ccw */
1757 ccw++;
1758 memset(ccw, 0, sizeof (struct ccw1));
1759 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
1760 ccw->flags = CCW_FLAG_CC;
1761 ccw->count = 16;
1762 ccw->cda = (__u32)(addr_t) LO_data;
1763
1764 /* TIC to the failed ccw */
1765 ccw++;
1766 ccw->cmd_code = CCW_CMD_TIC;
1767 ccw->cda = cpa;
1768
1769 /* fill erp related fields */
1770 erp->function = dasd_3990_erp_action_1B_32;
1771 erp->refers = default_erp->refers;
1772 erp->device = device;
1773 erp->magic = default_erp->magic;
1774 erp->expires = 0;
1775 erp->retries = 256;
1776 erp->buildclk = get_clock();
1777 erp->status = DASD_CQR_FILLED;
1778
1779 /* remove the default erp */
1780 dasd_free_erp_request(default_erp, device);
1781
1782 return erp;
1783
1784} /* end dasd_3990_erp_action_1B_32 */
1785
1786/*
1787 * DASD_3990_UPDATE_1B
1788 *
1789 * DESCRIPTION
1790 * Handles the update to the 32 byte 'Action 1B' of Single Program
1791 * Action Codes in case the first action was not successful.
1792 * The already created 'previous_erp' is the currently not successful
1793 * ERP.
1794 *
1795 * PARAMETER
1796 * previous_erp already created previous erp.
1797 * sense current sense data
1798 * RETURN VALUES
1799 * erp modified erp
1800 */
1801static struct dasd_ccw_req *
1802dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1803{
1804
1805 struct dasd_device *device = previous_erp->device;
1806 __u32 cpa = 0;
1807 struct dasd_ccw_req *cqr;
1808 struct dasd_ccw_req *erp;
1809 char *LO_data; /* struct LO_eckd_data */
1810 struct ccw1 *ccw;
1811
1812 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1813 "Write not finished because of unexpected condition"
1814 " - follow on");
1815
1816 /* determine the original cqr */
1817 cqr = previous_erp;
1818
1819 while (cqr->refers != NULL) {
1820 cqr = cqr->refers;
1821 }
1822
1823 /* for imprecise ending just do default erp */
1824 if (sense[1] & 0x01) {
1825
1826 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1827 "Imprecise ending is set - just retry");
1828
1829 previous_erp->status = DASD_CQR_QUEUED;
1830
1831 return previous_erp;
1832 }
1833
1834 /* determine the address of the CCW to be restarted */
1835 /* Imprecise ending is not set -> addr from IRB-SCSW */
1836 cpa = previous_erp->irb.scsw.cpa;
1837
1838 if (cpa == 0) {
1839
1840 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1841 "Unable to determine address of the CCW "
1842 "to be restarted");
1843
1844 previous_erp->status = DASD_CQR_FAILED;
1845
1846 return previous_erp;
1847 }
1848
1849 erp = previous_erp;
1850
1851 /* update the LO with the new returned sense data */
1852 LO_data = erp->data + sizeof (struct DE_eckd_data);
1853
1854 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1855
1856 DEV_MESSAGE(KERN_ERR, device, "%s",
1857 "BUG - this should not happen");
1858
1859 previous_erp->status = DASD_CQR_FAILED;
1860
1861 return previous_erp;
1862 }
1863
1864 if ((sense[7] & 0x3F) == 0x01) {
1865 /* operation code is WRITE DATA -> data area orientation */
1866 LO_data[0] = 0x81;
1867
1868 } else if ((sense[7] & 0x3F) == 0x03) {
1869 /* operation code is FORMAT WRITE -> index orientation */
1870 LO_data[0] = 0xC3;
1871
1872 } else {
1873 LO_data[0] = sense[7]; /* operation */
1874 }
1875
1876 LO_data[1] = sense[8]; /* auxiliary */
1877 LO_data[2] = sense[9];
1878 LO_data[3] = sense[3]; /* count */
1879 LO_data[4] = sense[29]; /* seek_addr.cyl */
1880 LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
1881 LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
1882
1883 memcpy(&(LO_data[8]), &(sense[11]), 8);
1884
1885 /* TIC to the failed ccw */
1886 ccw = erp->cpaddr; /* addr of DE ccw */
1887 ccw++; /* addr of LE ccw */
1888 ccw++; /* addr of TIC ccw */
1889 ccw->cda = cpa;
1890
1891 erp->status = DASD_CQR_QUEUED;
1892
1893 return erp;
1894
1895} /* end dasd_3990_update_1B */
1896
1897/*
1898 * DASD_3990_ERP_COMPOUND_RETRY
1899 *
1900 * DESCRIPTION
1901 * Handles the compound ERP action retry code.
1902 * NOTE: At least one retry is done even if zero is specified
1903 * by the sense data. This makes enqueueing of the request
1904 * easier.
1905 *
1906 * PARAMETER
1907 * sense sense data of the actual error
1908 * erp pointer to the currently created ERP
1909 *
1910 * RETURN VALUES
1911 * erp modified ERP pointer
1912 *
1913 */
1914static void
1915dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
1916{
1917
1918 switch (sense[25] & 0x03) {
1919 case 0x00: /* no not retry */
1920 erp->retries = 1;
1921 break;
1922
1923 case 0x01: /* retry 2 times */
1924 erp->retries = 2;
1925 break;
1926
1927 case 0x02: /* retry 10 times */
1928 erp->retries = 10;
1929 break;
1930
1931 case 0x03: /* retry 256 times */
1932 erp->retries = 256;
1933 break;
1934
1935 default:
1936 BUG();
1937 }
1938
1939 erp->function = dasd_3990_erp_compound_retry;
1940
1941} /* end dasd_3990_erp_compound_retry */
1942
1943/*
1944 * DASD_3990_ERP_COMPOUND_PATH
1945 *
1946 * DESCRIPTION
1947 * Handles the compound ERP action for retry on alternate
1948 * channel path.
1949 *
1950 * PARAMETER
1951 * sense sense data of the actual error
1952 * erp pointer to the currently created ERP
1953 *
1954 * RETURN VALUES
1955 * erp modified ERP pointer
1956 *
1957 */
1958static void
1959dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1960{
1961
1962 if (sense[25] & DASD_SENSE_BIT_3) {
1963 dasd_3990_erp_alternate_path(erp);
1964
1965 if (erp->status == DASD_CQR_FAILED) {
1966 /* reset the lpm and the status to be able to
1967 * try further actions. */
1968
1969 erp->lpm = 0;
1970
1971 erp->status = DASD_CQR_ERROR;
1972
1973 }
1974 }
1975
1976 erp->function = dasd_3990_erp_compound_path;
1977
1978} /* end dasd_3990_erp_compound_path */
1979
1980/*
1981 * DASD_3990_ERP_COMPOUND_CODE
1982 *
1983 * DESCRIPTION
1984 * Handles the compound ERP action for retry code.
1985 *
1986 * PARAMETER
1987 * sense sense data of the actual error
1988 * erp pointer to the currently created ERP
1989 *
1990 * RETURN VALUES
1991 * erp NEW ERP pointer
1992 *
1993 */
1994static struct dasd_ccw_req *
1995dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
1996{
1997
1998 if (sense[25] & DASD_SENSE_BIT_2) {
1999
2000 switch (sense[28]) {
2001 case 0x17:
2002 /* issue a Diagnostic Control command with an
2003 * Inhibit Write subcommand and controler modifier */
2004 erp = dasd_3990_erp_DCTL(erp, 0x20);
2005 break;
2006
2007 case 0x25:
2008 /* wait for 5 seconds and retry again */
2009 erp->retries = 1;
2010
2011 dasd_3990_erp_block_queue (erp, 5*HZ);
2012 break;
2013
2014 default:
2015 /* should not happen - continue */
2016 break;
2017 }
2018 }
2019
2020 erp->function = dasd_3990_erp_compound_code;
2021
2022 return erp;
2023
2024} /* end dasd_3990_erp_compound_code */
2025
2026/*
2027 * DASD_3990_ERP_COMPOUND_CONFIG
2028 *
2029 * DESCRIPTION
2030 * Handles the compound ERP action for configruation
2031 * dependent error.
2032 * Note: duplex handling is not implemented (yet).
2033 *
2034 * PARAMETER
2035 * sense sense data of the actual error
2036 * erp pointer to the currently created ERP
2037 *
2038 * RETURN VALUES
2039 * erp modified ERP pointer
2040 *
2041 */
2042static void
2043dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
2044{
2045
2046 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
2047
2048 /* set to suspended duplex state then restart */
2049 struct dasd_device *device = erp->device;
2050
2051 DEV_MESSAGE(KERN_ERR, device, "%s",
2052 "Set device to suspended duplex state should be "
2053 "done!\n"
2054 "This is not implemented yet (for compound ERP)"
2055 " - please report to linux390@de.ibm.com");
2056
2057 }
2058
2059 erp->function = dasd_3990_erp_compound_config;
2060
2061} /* end dasd_3990_erp_compound_config */
2062
2063/*
2064 * DASD_3990_ERP_COMPOUND
2065 *
2066 * DESCRIPTION
2067 * Does the further compound program action if
2068 * compound retry was not successful.
2069 *
2070 * PARAMETER
2071 * sense sense data of the actual error
2072 * erp pointer to the current (failed) ERP
2073 *
2074 * RETURN VALUES
2075 * erp (additional) ERP pointer
2076 *
2077 */
2078static struct dasd_ccw_req *
2079dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
2080{
2081
2082 if ((erp->function == dasd_3990_erp_compound_retry) &&
2083 (erp->status == DASD_CQR_ERROR)) {
2084
2085 dasd_3990_erp_compound_path(erp, sense);
2086 }
2087
2088 if ((erp->function == dasd_3990_erp_compound_path) &&
2089 (erp->status == DASD_CQR_ERROR)) {
2090
2091 erp = dasd_3990_erp_compound_code(erp, sense);
2092 }
2093
2094 if ((erp->function == dasd_3990_erp_compound_code) &&
2095 (erp->status == DASD_CQR_ERROR)) {
2096
2097 dasd_3990_erp_compound_config(erp, sense);
2098 }
2099
2100 /* if no compound action ERP specified, the request failed */
2101 if (erp->status == DASD_CQR_ERROR) {
2102
2103 erp->status = DASD_CQR_FAILED;
2104 }
2105
2106 return erp;
2107
2108} /* end dasd_3990_erp_compound */
2109
2110/*
2111 * DASD_3990_ERP_INSPECT_32
2112 *
2113 * DESCRIPTION
2114 * Does a detailed inspection of the 32 byte sense data
2115 * and sets up a related error recovery action.
2116 *
2117 * PARAMETER
2118 * sense sense data of the actual error
2119 * erp pointer to the currently created default ERP
2120 *
2121 * RETURN VALUES
2122 * erp_filled pointer to the ERP
2123 *
2124 */
2125static struct dasd_ccw_req *
2126dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2127{
2128
2129 struct dasd_device *device = erp->device;
2130
2131 erp->function = dasd_3990_erp_inspect_32;
2132
2133 if (sense[25] & DASD_SENSE_BIT_0) {
2134
2135 /* compound program action codes (byte25 bit 0 == '1') */
2136 dasd_3990_erp_compound_retry(erp, sense);
2137
2138 } else {
2139
2140 /* single program action codes (byte25 bit 0 == '0') */
2141 switch (sense[25]) {
2142
2143 case 0x00: /* success - use default ERP for retries */
2144 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2145 "ERP called for successful request"
2146 " - just retry");
2147 break;
2148
2149 case 0x01: /* fatal error */
2150 DEV_MESSAGE(KERN_ERR, device, "%s",
2151 "Fatal error should have been "
2152 "handled within the interrupt handler");
2153
2154 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2155 break;
2156
2157 case 0x02: /* intervention required */
2158 case 0x03: /* intervention required during dual copy */
2159 erp = dasd_3990_erp_int_req(erp);
2160 break;
2161
2162 case 0x0F: /* length mismatch during update write command */
2163 DEV_MESSAGE(KERN_ERR, device, "%s",
2164 "update write command error - should not "
2165 "happen;\n"
2166 "Please send this message together with "
2167 "the above sense data to linux390@de."
2168 "ibm.com");
2169
2170 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2171 break;
2172
2173 case 0x10: /* logging required for other channel program */
2174 erp = dasd_3990_erp_action_10_32(erp, sense);
2175 break;
2176
2177 case 0x15: /* next track outside defined extend */
2178 DEV_MESSAGE(KERN_ERR, device, "%s",
2179 "next track outside defined extend - "
2180 "should not happen;\n"
2181 "Please send this message together with "
2182 "the above sense data to linux390@de."
2183 "ibm.com");
2184
2185 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2186 break;
2187
2188 case 0x1B: /* unexpected condition during write */
2189
2190 erp = dasd_3990_erp_action_1B_32(erp, sense);
2191 break;
2192
2193 case 0x1C: /* invalid data */
2194 DEV_MESSAGE(KERN_EMERG, device, "%s",
2195 "Data recovered during retry with PCI "
2196 "fetch mode active");
2197
2198 /* not possible to handle this situation in Linux */
2199 panic
2200 ("Invalid data - No way to inform application "
2201 "about the possibly incorrect data");
2202 break;
2203
2204 case 0x1D: /* state-change pending */
2205 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2206 "A State change pending condition exists "
2207 "for the subsystem or device");
2208
2209 erp = dasd_3990_erp_action_4(erp, sense);
2210 break;
2211
2212 case 0x1E: /* busy */
2213 DEV_MESSAGE(KERN_DEBUG, device, "%s",
2214 "Busy condition exists "
2215 "for the subsystem or device");
2216 erp = dasd_3990_erp_action_4(erp, sense);
2217 break;
2218
2219 default: /* all others errors - default erp */
2220 break;
2221 }
2222 }
2223
2224 return erp;
2225
2226} /* end dasd_3990_erp_inspect_32 */
2227
2228/*
2229 *****************************************************************************
2230 * main ERP control fuctions (24 and 32 byte sense)
2231 *****************************************************************************
2232 */
2233
2234/*
2235 * DASD_3990_ERP_INSPECT
2236 *
2237 * DESCRIPTION
2238 * Does a detailed inspection for sense data by calling either
2239 * the 24-byte or the 32-byte inspection routine.
2240 *
2241 * PARAMETER
2242 * erp pointer to the currently created default ERP
2243 * RETURN VALUES
2244 * erp_new contens was possibly modified
2245 */
2246static struct dasd_ccw_req *
2247dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
2248{
2249
2250 struct dasd_ccw_req *erp_new = NULL;
2251 /* sense data are located in the refers record of the */
2252 /* already set up new ERP ! */
2253 char *sense = erp->refers->irb.ecw;
2254
2255 /* distinguish between 24 and 32 byte sense data */
2256 if (sense[27] & DASD_SENSE_BIT_0) {
2257
2258 /* inspect the 24 byte sense data */
2259 erp_new = dasd_3990_erp_inspect_24(erp, sense);
2260
2261 } else {
2262
2263 /* inspect the 32 byte sense data */
2264 erp_new = dasd_3990_erp_inspect_32(erp, sense);
2265
2266 } /* end distinguish between 24 and 32 byte sense data */
2267
2268 return erp_new;
2269}
2270
2271/*
2272 * DASD_3990_ERP_ADD_ERP
2273 *
2274 * DESCRIPTION
2275 * This funtion adds an additional request block (ERP) to the head of
2276 * the given cqr (or erp).
2277 * This erp is initialized as an default erp (retry TIC)
2278 *
2279 * PARAMETER
2280 * cqr head of the current ERP-chain (or single cqr if
2281 * first error)
2282 * RETURN VALUES
2283 * erp pointer to new ERP-chain head
2284 */
2285static struct dasd_ccw_req *
2286dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2287{
2288
2289 struct dasd_device *device = cqr->device;
2290 struct ccw1 *ccw;
2291
2292 /* allocate additional request block */
2293 struct dasd_ccw_req *erp;
2294
2295 erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, cqr->device);
2296 if (IS_ERR(erp)) {
2297 if (cqr->retries <= 0) {
2298 DEV_MESSAGE(KERN_ERR, device, "%s",
2299 "Unable to allocate ERP request");
2300 cqr->status = DASD_CQR_FAILED;
2301 cqr->stopclk = get_clock ();
2302 } else {
2303 DEV_MESSAGE (KERN_ERR, device,
2304 "Unable to allocate ERP request "
2305 "(%i retries left)",
2306 cqr->retries);
2307 dasd_set_timer(device, (HZ << 3));
2308 }
2309 return cqr;
2310 }
2311
2312 /* initialize request with default TIC to current ERP/CQR */
2313 ccw = erp->cpaddr;
2314 ccw->cmd_code = CCW_CMD_NOOP;
2315 ccw->flags = CCW_FLAG_CC;
2316 ccw++;
2317 ccw->cmd_code = CCW_CMD_TIC;
2318 ccw->cda = (long)(cqr->cpaddr);
2319 erp->function = dasd_3990_erp_add_erp;
2320 erp->refers = cqr;
2321 erp->device = cqr->device;
2322 erp->magic = cqr->magic;
2323 erp->expires = 0;
2324 erp->retries = 256;
2325 erp->buildclk = get_clock();
2326
2327 erp->status = DASD_CQR_FILLED;
2328
2329 return erp;
2330}
2331
2332/*
2333 * DASD_3990_ERP_ADDITIONAL_ERP
2334 *
2335 * DESCRIPTION
2336 * An additional ERP is needed to handle the current error.
2337 * Add ERP to the head of the ERP-chain containing the ERP processing
2338 * determined based on the sense data.
2339 *
2340 * PARAMETER
2341 * cqr head of the current ERP-chain (or single cqr if
2342 * first error)
2343 *
2344 * RETURN VALUES
2345 * erp pointer to new ERP-chain head
2346 */
2347static struct dasd_ccw_req *
2348dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
2349{
2350
2351 struct dasd_ccw_req *erp = NULL;
2352
2353 /* add erp and initialize with default TIC */
2354 erp = dasd_3990_erp_add_erp(cqr);
2355
2356 /* inspect sense, determine specific ERP if possible */
2357 if (erp != cqr) {
2358
2359 erp = dasd_3990_erp_inspect(erp);
2360 }
2361
2362 return erp;
2363
2364} /* end dasd_3990_erp_additional_erp */
2365
2366/*
2367 * DASD_3990_ERP_ERROR_MATCH
2368 *
2369 * DESCRIPTION
2370 * Check if the device status of the given cqr is the same.
2371 * This means that the failed CCW and the relevant sense data
2372 * must match.
2373 * I don't distinguish between 24 and 32 byte sense because in case of
2374 * 24 byte sense byte 25 and 27 is set as well.
2375 *
2376 * PARAMETER
2377 * cqr1 first cqr, which will be compared with the
2378 * cqr2 second cqr.
2379 *
2380 * RETURN VALUES
2381 * match 'boolean' for match found
2382 * returns 1 if match found, otherwise 0.
2383 */
2384static int
2385dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
2386{
2387
2388 /* check failed CCW */
2389 if (cqr1->irb.scsw.cpa != cqr2->irb.scsw.cpa) {
2390 // return 0; /* CCW doesn't match */
2391 }
2392
2393 /* check sense data; byte 0-2,25,27 */
2394 if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) &&
2395 (cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) &&
2396 (cqr1->irb.ecw[25] == cqr2->irb.ecw[25]))) {
2397
2398 return 0; /* sense doesn't match */
2399 }
2400
2401 return 1; /* match */
2402
2403} /* end dasd_3990_erp_error_match */
2404
2405/*
2406 * DASD_3990_ERP_IN_ERP
2407 *
2408 * DESCRIPTION
2409 * check if the current error already happened before.
2410 * quick exit if current cqr is not an ERP (cqr->refers=NULL)
2411 *
2412 * PARAMETER
2413 * cqr failed cqr (either original cqr or already an erp)
2414 *
2415 * RETURN VALUES
2416 * erp erp-pointer to the already defined error
2417 * recovery procedure OR
2418 * NULL if a 'new' error occurred.
2419 */
2420static struct dasd_ccw_req *
2421dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
2422{
2423
2424 struct dasd_ccw_req *erp_head = cqr, /* save erp chain head */
2425 *erp_match = NULL; /* save erp chain head */
2426 int match = 0; /* 'boolean' for matching error found */
2427
2428 if (cqr->refers == NULL) { /* return if not in erp */
2429 return NULL;
2430 }
2431
2432 /* check the erp/cqr chain for current error */
2433 do {
2434 match = dasd_3990_erp_error_match(erp_head, cqr->refers);
2435 erp_match = cqr; /* save possible matching erp */
2436 cqr = cqr->refers; /* check next erp/cqr in queue */
2437
2438 } while ((cqr->refers != NULL) && (!match));
2439
2440 if (!match) {
2441 return NULL; /* no match was found */
2442 }
2443
2444 return erp_match; /* return address of matching erp */
2445
2446} /* END dasd_3990_erp_in_erp */
2447
2448/*
2449 * DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
2450 *
2451 * DESCRIPTION
2452 * No retry is left for the current ERP. Check what has to be done
2453 * with the ERP.
2454 * - do further defined ERP action or
2455 * - wait for interrupt or
2456 * - exit with permanent error
2457 *
2458 * PARAMETER
2459 * erp ERP which is in progress with no retry left
2460 *
2461 * RETURN VALUES
2462 * erp modified/additional ERP
2463 */
2464static struct dasd_ccw_req *
2465dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2466{
2467
2468 struct dasd_device *device = erp->device;
2469 char *sense = erp->irb.ecw;
2470
2471 /* check for 24 byte sense ERP */
2472 if ((erp->function == dasd_3990_erp_bus_out) ||
2473 (erp->function == dasd_3990_erp_action_1) ||
2474 (erp->function == dasd_3990_erp_action_4)) {
2475
2476 erp = dasd_3990_erp_action_1(erp);
2477
2478 } else if (erp->function == dasd_3990_erp_action_5) {
2479
2480 /* retries have not been successful */
2481 /* prepare erp for retry on different channel path */
2482 erp = dasd_3990_erp_action_1(erp);
2483
2484 if (!(sense[2] & DASD_SENSE_BIT_0)) {
2485
2486 /* issue a Diagnostic Control command with an
2487 * Inhibit Write subcommand */
2488
2489 switch (sense[25]) {
2490 case 0x17:
2491 case 0x57:{ /* controller */
2492 erp = dasd_3990_erp_DCTL(erp, 0x20);
2493 break;
2494 }
2495 case 0x18:
2496 case 0x58:{ /* channel path */
2497 erp = dasd_3990_erp_DCTL(erp, 0x40);
2498 break;
2499 }
2500 case 0x19:
2501 case 0x59:{ /* storage director */
2502 erp = dasd_3990_erp_DCTL(erp, 0x80);
2503 break;
2504 }
2505 default:
2506 DEV_MESSAGE(KERN_DEBUG, device,
2507 "invalid subcommand modifier 0x%x "
2508 "for Diagnostic Control Command",
2509 sense[25]);
2510 }
2511 }
2512
2513 /* check for 32 byte sense ERP */
2514 } else if ((erp->function == dasd_3990_erp_compound_retry) ||
2515 (erp->function == dasd_3990_erp_compound_path) ||
2516 (erp->function == dasd_3990_erp_compound_code) ||
2517 (erp->function == dasd_3990_erp_compound_config)) {
2518
2519 erp = dasd_3990_erp_compound(erp, sense);
2520
2521 } else {
2522 /* No retry left and no additional special handling */
2523 /*necessary */
2524 DEV_MESSAGE(KERN_ERR, device,
2525 "no retries left for erp %p - "
2526 "set status to FAILED", erp);
2527
2528 erp->status = DASD_CQR_FAILED;
2529 }
2530
2531 return erp;
2532
2533} /* end dasd_3990_erp_further_erp */
2534
2535/*
2536 * DASD_3990_ERP_HANDLE_MATCH_ERP
2537 *
2538 * DESCRIPTION
2539 * An error occurred again and an ERP has been detected which is already
2540 * used to handle this error (e.g. retries).
2541 * All prior ERP's are asumed to be successful and therefore removed
2542 * from queue.
2543 * If retry counter of matching erp is already 0, it is checked if further
2544 * action is needed (besides retry) or if the ERP has failed.
2545 *
2546 * PARAMETER
2547 * erp_head first ERP in ERP-chain
2548 * erp ERP that handles the actual error.
2549 * (matching erp)
2550 *
2551 * RETURN VALUES
2552 * erp modified/additional ERP
2553 */
2554static struct dasd_ccw_req *
2555dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2556 struct dasd_ccw_req *erp)
2557{
2558
2559 struct dasd_device *device = erp_head->device;
2560 struct dasd_ccw_req *erp_done = erp_head; /* finished req */
2561 struct dasd_ccw_req *erp_free = NULL; /* req to be freed */
2562
2563 /* loop over successful ERPs and remove them from chanq */
2564 while (erp_done != erp) {
2565
2566 if (erp_done == NULL) /* end of chain reached */
2567 panic(PRINTK_HEADER "Programming error in ERP! The "
2568 "original request was lost\n");
2569
2570 /* remove the request from the device queue */
2571 list_del(&erp_done->list);
2572
2573 erp_free = erp_done;
2574 erp_done = erp_done->refers;
2575
2576 /* free the finished erp request */
2577 dasd_free_erp_request(erp_free, erp_free->device);
2578
2579 } /* end while */
2580
2581 if (erp->retries > 0) {
2582
2583 char *sense = erp->refers->irb.ecw;
2584
2585 /* check for special retries */
2586 if (erp->function == dasd_3990_erp_action_4) {
2587
2588 erp = dasd_3990_erp_action_4(erp, sense);
2589
2590 } else if (erp->function == dasd_3990_erp_action_1B_32) {
2591
2592 erp = dasd_3990_update_1B(erp, sense);
2593
2594 } else if (erp->function == dasd_3990_erp_int_req) {
2595
2596 erp = dasd_3990_erp_int_req(erp);
2597
2598 } else {
2599 /* simple retry */
2600 DEV_MESSAGE(KERN_DEBUG, device,
2601 "%i retries left for erp %p",
2602 erp->retries, erp);
2603
2604 /* handle the request again... */
2605 erp->status = DASD_CQR_QUEUED;
2606 }
2607
2608 } else {
2609 /* no retry left - check for further necessary action */
2610 /* if no further actions, handle rest as permanent error */
2611 erp = dasd_3990_erp_further_erp(erp);
2612 }
2613
2614 return erp;
2615
2616} /* end dasd_3990_erp_handle_match_erp */
2617
2618/*
2619 * DASD_3990_ERP_ACTION
2620 *
2621 * DESCRIPTION
2622 * controll routine for 3990 erp actions.
2623 * Has to be called with the queue lock (namely the s390_irq_lock) acquired.
2624 *
2625 * PARAMETER
2626 * cqr failed cqr (either original cqr or already an erp)
2627 *
2628 * RETURN VALUES
2629 * erp erp-pointer to the head of the ERP action chain.
2630 * This means:
2631 * - either a ptr to an additional ERP cqr or
2632 * - the original given cqr (which's status might
2633 * be modified)
2634 */
2635struct dasd_ccw_req *
2636dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2637{
2638
2639 struct dasd_ccw_req *erp = NULL;
2640 struct dasd_device *device = cqr->device;
2641 __u32 cpa = cqr->irb.scsw.cpa;
2642
2643#ifdef ERP_DEBUG
2644 /* print current erp_chain */
2645 DEV_MESSAGE(KERN_ERR, device, "%s",
2646 "ERP chain at BEGINNING of ERP-ACTION");
2647 {
2648 struct dasd_ccw_req *temp_erp = NULL;
2649
2650 for (temp_erp = cqr;
2651 temp_erp != NULL; temp_erp = temp_erp->refers) {
2652
2653 DEV_MESSAGE(KERN_ERR, device,
2654 " erp %p (%02x) refers to %p",
2655 temp_erp, temp_erp->status,
2656 temp_erp->refers);
2657 }
2658 }
2659#endif /* ERP_DEBUG */
2660
2661 /* double-check if current erp/cqr was successfull */
2662 if ((cqr->irb.scsw.cstat == 0x00) &&
2663 (cqr->irb.scsw.dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2664
2665 DEV_MESSAGE(KERN_DEBUG, device,
2666 "ERP called for successful request %p"
2667 " - NO ERP necessary", cqr);
2668
2669 cqr->status = DASD_CQR_DONE;
2670
2671 return cqr;
2672 }
2673 /* check if sense data are available */
2674 if (!cqr->irb.ecw) {
2675 DEV_MESSAGE(KERN_DEBUG, device,
2676 "ERP called witout sense data avail ..."
2677 "request %p - NO ERP possible", cqr);
2678
2679 cqr->status = DASD_CQR_FAILED;
2680
2681 return cqr;
2682
2683 }
2684
2685 /* check if error happened before */
2686 erp = dasd_3990_erp_in_erp(cqr);
2687
2688 if (erp == NULL) {
2689 /* no matching erp found - set up erp */
2690 erp = dasd_3990_erp_additional_erp(cqr);
2691 } else {
2692 /* matching erp found - set all leading erp's to DONE */
2693 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
2694 }
2695
2696#ifdef ERP_DEBUG
2697 /* print current erp_chain */
2698 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION");
2699 {
2700 struct dasd_ccw_req *temp_erp = NULL;
2701 for (temp_erp = erp;
2702 temp_erp != NULL; temp_erp = temp_erp->refers) {
2703
2704 DEV_MESSAGE(KERN_ERR, device,
2705 " erp %p (%02x) refers to %p",
2706 temp_erp, temp_erp->status,
2707 temp_erp->refers);
2708 }
2709 }
2710#endif /* ERP_DEBUG */
2711
2712 if (erp->status == DASD_CQR_FAILED)
2713 dasd_log_ccw(erp, 1, cpa);
2714
2715 /* enqueue added ERP request */
2716 if (erp->status == DASD_CQR_FILLED) {
2717 erp->status = DASD_CQR_QUEUED;
2718 list_add(&erp->list, &device->ccw_queue);
2719 }
2720
2721 return erp;
2722
2723} /* end dasd_3990_erp_action */
2724
2725/*
2726 * Overrides for Emacs so that we follow Linus's tabbing style.
2727 * Emacs will notice this stuff at the end of the file and automatically
2728 * adjust the settings for this buffer only. This must remain at the end
2729 * of the file.
2730 * ---------------------------------------------------------------------------
2731 * Local variables:
2732 * c-indent-level: 4
2733 * c-brace-imaginary-offset: 0
2734 * c-brace-offset: -4
2735 * c-argdecl-indent: 4
2736 * c-label-offset: -4
2737 * c-continued-statement-offset: 4
2738 * c-continued-brace-offset: 0
2739 * indent-tabs-mode: 1
2740 * tab-width: 8
2741 * End:
2742 */
diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c
new file mode 100644
index 000000000000..01e87170a3a2
--- /dev/null
+++ b/drivers/s390/block/dasd_9336_erp.c
@@ -0,0 +1,61 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9336_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 * $Revision: 1.8 $
8 */
9
10#define PRINTK_HEADER "dasd_erp(9336)"
11
12#include "dasd_int.h"
13
14
15/*
16 * DASD_9336_ERP_EXAMINE
17 *
18 * DESCRIPTION
19 * Checks only for fatal/no/recover error.
20 * A detailed examination of the sense data is done later outside
21 * the interrupt handler.
22 *
23 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
24 * 'Chapter 7. 9336 Sense Data'.
25 *
26 * RETURN VALUES
27 * dasd_era_none no error
28 * dasd_era_fatal for all fatal (unrecoverable errors)
29 * dasd_era_recover for all others.
30 */
31dasd_era_t
32dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
33{
34 /* check for successful execution first */
35 if (irb->scsw.cstat == 0x00 &&
36 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
37 return dasd_era_none;
38
39 /* examine the 24 byte sense data */
40 return dasd_era_recover;
41
42} /* END dasd_9336_erp_examine */
43
44/*
45 * Overrides for Emacs so that we follow Linus's tabbing style.
46 * Emacs will notice this stuff at the end of the file and automatically
47 * adjust the settings for this buffer only. This must remain at the end
48 * of the file.
49 * ---------------------------------------------------------------------------
50 * Local variables:
51 * c-indent-level: 4
52 * c-brace-imaginary-offset: 0
53 * c-brace-offset: -4
54 * c-argdecl-indent: 4
55 * c-label-offset: -4
56 * c-continued-statement-offset: 4
57 * c-continued-brace-offset: 0
58 * indent-tabs-mode: 1
59 * tab-width: 8
60 * End:
61 */
diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c
new file mode 100644
index 000000000000..2a23b74faf3f
--- /dev/null
+++ b/drivers/s390/block/dasd_9343_erp.c
@@ -0,0 +1,22 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9345_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 * $Revision: 1.13 $
8 */
9
10#define PRINTK_HEADER "dasd_erp(9343)"
11
12#include "dasd_int.h"
13
14dasd_era_t
15dasd_9343_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
16{
17 if (irb->scsw.cstat == 0x00 &&
18 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
19 return dasd_era_none;
20
21 return dasd_era_recover;
22}
diff --git a/drivers/s390/block/dasd_cmb.c b/drivers/s390/block/dasd_cmb.c
new file mode 100644
index 000000000000..ed1ab474c0c6
--- /dev/null
+++ b/drivers/s390/block/dasd_cmb.c
@@ -0,0 +1,145 @@
1/*
2 * linux/drivers/s390/block/dasd_cmb.c ($Revision: 1.6 $)
3 *
4 * Linux on zSeries Channel Measurement Facility support
5 * (dasd device driver interface)
6 *
7 * Copyright 2000,2003 IBM Corporation
8 *
9 * Author: Arnd Bergmann <arndb@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25#include <linux/init.h>
26#include <linux/ioctl32.h>
27#include <linux/module.h>
28#include <asm/ccwdev.h>
29#include <asm/cmb.h>
30
31#include "dasd_int.h"
32
33static int
34dasd_ioctl_cmf_enable(struct block_device *bdev, int no, long args)
35{
36 struct dasd_device *device;
37
38 device = bdev->bd_disk->private_data;
39 if (!device)
40 return -EINVAL;
41
42 return enable_cmf(device->cdev);
43}
44
45static int
46dasd_ioctl_cmf_disable(struct block_device *bdev, int no, long args)
47{
48 struct dasd_device *device;
49
50 device = bdev->bd_disk->private_data;
51 if (!device)
52 return -EINVAL;
53
54 return disable_cmf(device->cdev);
55}
56
57static int
58dasd_ioctl_readall_cmb(struct block_device *bdev, int no, long args)
59{
60 struct dasd_device *device;
61 struct cmbdata __user *udata;
62 struct cmbdata data;
63 size_t size;
64 int ret;
65
66 device = bdev->bd_disk->private_data;
67 if (!device)
68 return -EINVAL;
69 udata = (void __user *) args;
70 size = _IOC_SIZE(no);
71
72 if (!access_ok(VERIFY_WRITE, udata, size))
73 return -EFAULT;
74 ret = cmf_readall(device->cdev, &data);
75 if (ret)
76 return ret;
77 if (copy_to_user(udata, &data, min(size, sizeof(*udata))))
78 return -EFAULT;
79 return 0;
80}
81
82/* module initialization below here. dasd already provides a mechanism
83 * to dynamically register ioctl functions, so we simply use this. */
84static inline int
85ioctl_reg(unsigned int no, dasd_ioctl_fn_t handler)
86{
87 int ret;
88 ret = dasd_ioctl_no_register(THIS_MODULE, no, handler);
89#ifdef CONFIG_COMPAT
90 if (ret)
91 return ret;
92
93 ret = register_ioctl32_conversion(no, NULL);
94 if (ret)
95 dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
96#endif
97 return ret;
98}
99
100static inline void
101ioctl_unreg(unsigned int no, dasd_ioctl_fn_t handler)
102{
103 dasd_ioctl_no_unregister(THIS_MODULE, no, handler);
104#ifdef CONFIG_COMPAT
105 unregister_ioctl32_conversion(no);
106#endif
107
108}
109
110static void
111dasd_cmf_exit(void)
112{
113 ioctl_unreg(BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
114 ioctl_unreg(BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
115 ioctl_unreg(BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
116}
117
118static int __init
119dasd_cmf_init(void)
120{
121 int ret;
122 ret = ioctl_reg (BIODASDCMFENABLE, dasd_ioctl_cmf_enable);
123 if (ret)
124 goto err;
125 ret = ioctl_reg (BIODASDCMFDISABLE, dasd_ioctl_cmf_disable);
126 if (ret)
127 goto err;
128 ret = ioctl_reg (BIODASDREADALLCMB, dasd_ioctl_readall_cmb);
129 if (ret)
130 goto err;
131
132 return 0;
133err:
134 dasd_cmf_exit();
135
136 return ret;
137}
138
139module_init(dasd_cmf_init);
140module_exit(dasd_cmf_exit);
141
142MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
143MODULE_LICENSE("GPL");
144MODULE_DESCRIPTION("channel measurement facility interface for dasd\n"
145 "Copyright 2003 IBM Corporation\n");
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
new file mode 100644
index 000000000000..ad1841a96c87
--- /dev/null
+++ b/drivers/s390/block/dasd_devmap.c
@@ -0,0 +1,772 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_devmap.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * Device mapping and dasd= parameter parsing functions. All devmap
11 * functions may not be called from interrupt context. In particular
12 * dasd_get_device is a no-no from interrupt context.
13 *
14 * $Revision: 1.37 $
15 */
16
17#include <linux/config.h>
18#include <linux/ctype.h>
19#include <linux/init.h>
20
21#include <asm/debug.h>
22#include <asm/uaccess.h>
23
24/* This is ugly... */
25#define PRINTK_HEADER "dasd_devmap:"
26
27#include "dasd_int.h"
28
29kmem_cache_t *dasd_page_cache;
30EXPORT_SYMBOL(dasd_page_cache);
31
32/*
33 * dasd_devmap_t is used to store the features and the relation
34 * between device number and device index. To find a dasd_devmap_t
35 * that corresponds to a device number of a device index each
36 * dasd_devmap_t is added to two linked lists, one to search by
37 * the device number and one to search by the device index. As
38 * soon as big minor numbers are available the device index list
39 * can be removed since the device number will then be identical
40 * to the device index.
41 */
42struct dasd_devmap {
43 struct list_head list;
44 char bus_id[BUS_ID_SIZE];
45 unsigned int devindex;
46 unsigned short features;
47 struct dasd_device *device;
48};
49
50/*
51 * Parameter parsing functions for dasd= parameter. The syntax is:
52 * <devno> : (0x)?[0-9a-fA-F]+
53 * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
54 * <feature> : ro
55 * <feature_list> : \(<feature>(:<feature>)*\)
56 * <devno-range> : <devno>(-<devno>)?<feature_list>?
57 * <busid-range> : <busid>(-<busid>)?<feature_list>?
58 * <devices> : <devno-range>|<busid-range>
59 * <dasd_module> : dasd_diag_mod|dasd_eckd_mod|dasd_fba_mod
60 *
61 * <dasd> : autodetect|probeonly|<devices>(,<devices>)*
62 */
63
64int dasd_probeonly = 0; /* is true, when probeonly mode is active */
65int dasd_autodetect = 0; /* is true, when autodetection is active */
66
67/*
68 * char *dasd[] is intended to hold the ranges supplied by the dasd= statement
69 * it is named 'dasd' to directly be filled by insmod with the comma separated
70 * strings when running as a module.
71 */
72static char *dasd[256];
73/*
74 * Single spinlock to protect devmap structures and lists.
75 */
76static DEFINE_SPINLOCK(dasd_devmap_lock);
77
78/*
79 * Hash lists for devmap structures.
80 */
81static struct list_head dasd_hashlists[256];
82int dasd_max_devindex;
83
84static struct dasd_devmap *dasd_add_busid(char *, int);
85
86static inline int
87dasd_hash_busid(char *bus_id)
88{
89 int hash, i;
90
91 hash = 0;
92 for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++)
93 hash += *bus_id;
94 return hash & 0xff;
95}
96
97#ifndef MODULE
98/*
99 * The parameter parsing functions for builtin-drivers are called
100 * before kmalloc works. Store the pointers to the parameters strings
101 * into dasd[] for later processing.
102 */
103static int __init
104dasd_call_setup(char *str)
105{
106 static int count = 0;
107
108 if (count < 256)
109 dasd[count++] = str;
110 return 1;
111}
112
113__setup ("dasd=", dasd_call_setup);
114#endif /* #ifndef MODULE */
115
116/*
117 * Read a device busid/devno from a string.
118 */
119static inline int
120dasd_busid(char **str, int *id0, int *id1, int *devno)
121{
122 int val, old_style;
123
124 /* check for leading '0x' */
125 old_style = 0;
126 if ((*str)[0] == '0' && (*str)[1] == 'x') {
127 *str += 2;
128 old_style = 1;
129 }
130 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
131 return -EINVAL;
132 val = simple_strtoul(*str, str, 16);
133 if (old_style || (*str)[0] != '.') {
134 *id0 = *id1 = 0;
135 if (val < 0 || val > 0xffff)
136 return -EINVAL;
137 *devno = val;
138 return 0;
139 }
140 /* New style x.y.z busid */
141 if (val < 0 || val > 0xff)
142 return -EINVAL;
143 *id0 = val;
144 (*str)++;
145 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
146 return -EINVAL;
147 val = simple_strtoul(*str, str, 16);
148 if (val < 0 || val > 0xff || (*str)++[0] != '.')
149 return -EINVAL;
150 *id1 = val;
151 if (!isxdigit((*str)[0])) /* We require at least one hex digit */
152 return -EINVAL;
153 val = simple_strtoul(*str, str, 16);
154 if (val < 0 || val > 0xffff)
155 return -EINVAL;
156 *devno = val;
157 return 0;
158}
159
160/*
161 * Read colon separated list of dasd features. Currently there is
162 * only one: "ro" for read-only devices. The default feature set
163 * is empty (value 0).
164 */
165static inline int
166dasd_feature_list(char *str, char **endp)
167{
168 int features, len, rc;
169
170 rc = 0;
171 if (*str != '(') {
172 *endp = str;
173 return DASD_FEATURE_DEFAULT;
174 }
175 str++;
176 features = 0;
177
178 while (1) {
179 for (len = 0;
180 str[len] && str[len] != ':' && str[len] != ')'; len++);
181 if (len == 2 && !strncmp(str, "ro", 2))
182 features |= DASD_FEATURE_READONLY;
183 else if (len == 4 && !strncmp(str, "diag", 4))
184 features |= DASD_FEATURE_USEDIAG;
185 else {
186 MESSAGE(KERN_WARNING,
187 "unsupported feature: %*s, "
188 "ignoring setting", len, str);
189 rc = -EINVAL;
190 }
191 str += len;
192 if (*str != ':')
193 break;
194 str++;
195 }
196 if (*str != ')') {
197 MESSAGE(KERN_WARNING, "%s",
198 "missing ')' in dasd parameter string\n");
199 rc = -EINVAL;
200 } else
201 str++;
202 *endp = str;
203 if (rc != 0)
204 return rc;
205 return features;
206}
207
208/*
209 * Try to match the first element on the comma separated parse string
210 * with one of the known keywords. If a keyword is found, take the approprate
211 * action and return a pointer to the residual string. If the first element
212 * could not be matched to any keyword then return an error code.
213 */
214static char *
215dasd_parse_keyword( char *parsestring ) {
216
217 char *nextcomma, *residual_str;
218 int length;
219
220 nextcomma = strchr(parsestring,',');
221 if (nextcomma) {
222 length = nextcomma - parsestring;
223 residual_str = nextcomma + 1;
224 } else {
225 length = strlen(parsestring);
226 residual_str = parsestring + length;
227 }
228 if (strncmp ("autodetect", parsestring, length) == 0) {
229 dasd_autodetect = 1;
230 MESSAGE (KERN_INFO, "%s",
231 "turning to autodetection mode");
232 return residual_str;
233 }
234 if (strncmp ("probeonly", parsestring, length) == 0) {
235 dasd_probeonly = 1;
236 MESSAGE(KERN_INFO, "%s",
237 "turning to probeonly mode");
238 return residual_str;
239 }
240 if (strncmp ("fixedbuffers", parsestring, length) == 0) {
241 if (dasd_page_cache)
242 return residual_str;
243 dasd_page_cache =
244 kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0,
245 SLAB_CACHE_DMA, NULL, NULL );
246 if (!dasd_page_cache)
247 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
248 "fixed buffer mode disabled.");
249 else
250 MESSAGE (KERN_INFO, "%s",
251 "turning on fixed buffer mode");
252 return residual_str;
253 }
254 return ERR_PTR(-EINVAL);
255}
256
257/*
258 * Try to interprete the first element on the comma separated parse string
259 * as a device number or a range of devices. If the interpretation is
260 * successfull, create the matching dasd_devmap entries and return a pointer
261 * to the residual string.
262 * If interpretation fails or in case of an error, return an error code.
263 */
264static char *
265dasd_parse_range( char *parsestring ) {
266
267 struct dasd_devmap *devmap;
268 int from, from_id0, from_id1;
269 int to, to_id0, to_id1;
270 int features, rc;
271 char bus_id[BUS_ID_SIZE+1], *str;
272
273 str = parsestring;
274 rc = dasd_busid(&str, &from_id0, &from_id1, &from);
275 if (rc == 0) {
276 to = from;
277 to_id0 = from_id0;
278 to_id1 = from_id1;
279 if (*str == '-') {
280 str++;
281 rc = dasd_busid(&str, &to_id0, &to_id1, &to);
282 }
283 }
284 if (rc == 0 &&
285 (from_id0 != to_id0 || from_id1 != to_id1 || from > to))
286 rc = -EINVAL;
287 if (rc) {
288 MESSAGE(KERN_ERR, "Invalid device range %s", parsestring);
289 return ERR_PTR(rc);
290 }
291 features = dasd_feature_list(str, &str);
292 if (features < 0)
293 return ERR_PTR(-EINVAL);
294 while (from <= to) {
295 sprintf(bus_id, "%01x.%01x.%04x",
296 from_id0, from_id1, from++);
297 devmap = dasd_add_busid(bus_id, features);
298 if (IS_ERR(devmap))
299 return (char *)devmap;
300 }
301 if (*str == ',')
302 return str + 1;
303 if (*str == '\0')
304 return str;
305 MESSAGE(KERN_WARNING,
306 "junk at end of dasd parameter string: %s\n", str);
307 return ERR_PTR(-EINVAL);
308}
309
310static inline char *
311dasd_parse_next_element( char *parsestring ) {
312 char * residual_str;
313 residual_str = dasd_parse_keyword(parsestring);
314 if (!IS_ERR(residual_str))
315 return residual_str;
316 residual_str = dasd_parse_range(parsestring);
317 return residual_str;
318}
319
320/*
321 * Parse parameters stored in dasd[]
322 * The 'dasd=...' parameter allows to specify a comma separated list of
323 * keywords and device ranges. When the dasd driver is build into the kernel,
324 * the complete list will be stored as one element of the dasd[] array.
325 * When the dasd driver is build as a module, then the list is broken into
326 * it's elements and each dasd[] entry contains one element.
327 */
328int
329dasd_parse(void)
330{
331 int rc, i;
332 char *parsestring;
333
334 rc = 0;
335 for (i = 0; i < 256; i++) {
336 if (dasd[i] == NULL)
337 break;
338 parsestring = dasd[i];
339 /* loop over the comma separated list in the parsestring */
340 while (*parsestring) {
341 parsestring = dasd_parse_next_element(parsestring);
342 if(IS_ERR(parsestring)) {
343 rc = PTR_ERR(parsestring);
344 break;
345 }
346 }
347 if (rc) {
348 DBF_EVENT(DBF_ALERT, "%s", "invalid range found");
349 break;
350 }
351 }
352 return rc;
353}
354
355/*
356 * Add a devmap for the device specified by busid. It is possible that
357 * the devmap already exists (dasd= parameter). The order of the devices
358 * added through this function will define the kdevs for the individual
359 * devices.
360 */
361static struct dasd_devmap *
362dasd_add_busid(char *bus_id, int features)
363{
364 struct dasd_devmap *devmap, *new, *tmp;
365 int hash;
366
367 new = (struct dasd_devmap *)
368 kmalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
369 if (!new)
370 return ERR_PTR(-ENOMEM);
371 spin_lock(&dasd_devmap_lock);
372 devmap = 0;
373 hash = dasd_hash_busid(bus_id);
374 list_for_each_entry(tmp, &dasd_hashlists[hash], list)
375 if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
376 devmap = tmp;
377 break;
378 }
379 if (!devmap) {
380 /* This bus_id is new. */
381 new->devindex = dasd_max_devindex++;
382 strncpy(new->bus_id, bus_id, BUS_ID_SIZE);
383 new->features = features;
384 new->device = 0;
385 list_add(&new->list, &dasd_hashlists[hash]);
386 devmap = new;
387 new = 0;
388 }
389 spin_unlock(&dasd_devmap_lock);
390 if (new)
391 kfree(new);
392 return devmap;
393}
394
395/*
396 * Find devmap for device with given bus_id.
397 */
398static struct dasd_devmap *
399dasd_find_busid(char *bus_id)
400{
401 struct dasd_devmap *devmap, *tmp;
402 int hash;
403
404 spin_lock(&dasd_devmap_lock);
405 devmap = ERR_PTR(-ENODEV);
406 hash = dasd_hash_busid(bus_id);
407 list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
408 if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) {
409 devmap = tmp;
410 break;
411 }
412 }
413 spin_unlock(&dasd_devmap_lock);
414 return devmap;
415}
416
417/*
418 * Check if busid has been added to the list of dasd ranges.
419 */
420int
421dasd_busid_known(char *bus_id)
422{
423 return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
424}
425
426/*
427 * Forget all about the device numbers added so far.
428 * This may only be called at module unload or system shutdown.
429 */
430static void
431dasd_forget_ranges(void)
432{
433 struct dasd_devmap *devmap, *n;
434 int i;
435
436 spin_lock(&dasd_devmap_lock);
437 for (i = 0; i < 256; i++) {
438 list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
439 if (devmap->device != NULL)
440 BUG();
441 list_del(&devmap->list);
442 kfree(devmap);
443 }
444 }
445 spin_unlock(&dasd_devmap_lock);
446}
447
448/*
449 * Find the device struct by its device index.
450 */
451struct dasd_device *
452dasd_device_from_devindex(int devindex)
453{
454 struct dasd_devmap *devmap, *tmp;
455 struct dasd_device *device;
456 int i;
457
458 spin_lock(&dasd_devmap_lock);
459 devmap = 0;
460 for (i = 0; (i < 256) && !devmap; i++)
461 list_for_each_entry(tmp, &dasd_hashlists[i], list)
462 if (tmp->devindex == devindex) {
463 /* Found the devmap for the device. */
464 devmap = tmp;
465 break;
466 }
467 if (devmap && devmap->device) {
468 device = devmap->device;
469 dasd_get_device(device);
470 } else
471 device = ERR_PTR(-ENODEV);
472 spin_unlock(&dasd_devmap_lock);
473 return device;
474}
475
476/*
477 * Return devmap for cdev. If no devmap exists yet, create one and
478 * connect it to the cdev.
479 */
480static struct dasd_devmap *
481dasd_devmap_from_cdev(struct ccw_device *cdev)
482{
483 struct dasd_devmap *devmap;
484
485 devmap = dasd_find_busid(cdev->dev.bus_id);
486 if (IS_ERR(devmap))
487 devmap = dasd_add_busid(cdev->dev.bus_id,
488 DASD_FEATURE_DEFAULT);
489 return devmap;
490}
491
492/*
493 * Create a dasd device structure for cdev.
494 */
495struct dasd_device *
496dasd_create_device(struct ccw_device *cdev)
497{
498 struct dasd_devmap *devmap;
499 struct dasd_device *device;
500 int rc;
501
502 devmap = dasd_devmap_from_cdev(cdev);
503 if (IS_ERR(devmap))
504 return (void *) devmap;
505 cdev->dev.driver_data = devmap;
506
507 device = dasd_alloc_device();
508 if (IS_ERR(device))
509 return device;
510 atomic_set(&device->ref_count, 2);
511
512 spin_lock(&dasd_devmap_lock);
513 if (!devmap->device) {
514 devmap->device = device;
515 device->devindex = devmap->devindex;
516 if (devmap->features & DASD_FEATURE_READONLY)
517 set_bit(DASD_FLAG_RO, &device->flags);
518 else
519 clear_bit(DASD_FLAG_RO, &device->flags);
520 if (devmap->features & DASD_FEATURE_USEDIAG)
521 set_bit(DASD_FLAG_USE_DIAG, &device->flags);
522 else
523 clear_bit(DASD_FLAG_USE_DIAG, &device->flags);
524 get_device(&cdev->dev);
525 device->cdev = cdev;
526 rc = 0;
527 } else
528 /* Someone else was faster. */
529 rc = -EBUSY;
530 spin_unlock(&dasd_devmap_lock);
531
532 if (rc) {
533 dasd_free_device(device);
534 return ERR_PTR(rc);
535 }
536 return device;
537}
538
539/*
540 * Wait queue for dasd_delete_device waits.
541 */
542static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
543
544/*
545 * Remove a dasd device structure. The passed referenced
546 * is destroyed.
547 */
548void
549dasd_delete_device(struct dasd_device *device)
550{
551 struct ccw_device *cdev;
552 struct dasd_devmap *devmap;
553
554 /* First remove device pointer from devmap. */
555 devmap = dasd_find_busid(device->cdev->dev.bus_id);
556 if (IS_ERR(devmap))
557 BUG();
558 spin_lock(&dasd_devmap_lock);
559 if (devmap->device != device) {
560 spin_unlock(&dasd_devmap_lock);
561 dasd_put_device(device);
562 return;
563 }
564 devmap->device = NULL;
565 spin_unlock(&dasd_devmap_lock);
566
567 /* Drop ref_count by 2, one for the devmap reference and
568 * one for the passed reference. */
569 atomic_sub(2, &device->ref_count);
570
571 /* Wait for reference counter to drop to zero. */
572 wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
573
574 /* Disconnect dasd_device structure from ccw_device structure. */
575 cdev = device->cdev;
576 device->cdev = NULL;
577
578 /* Disconnect dasd_devmap structure from ccw_device structure. */
579 cdev->dev.driver_data = NULL;
580
581 /* Put ccw_device structure. */
582 put_device(&cdev->dev);
583
584 /* Now the device structure can be freed. */
585 dasd_free_device(device);
586}
587
588/*
589 * Reference counter dropped to zero. Wake up waiter
590 * in dasd_delete_device.
591 */
592void
593dasd_put_device_wake(struct dasd_device *device)
594{
595 wake_up(&dasd_delete_wq);
596}
597
598/*
599 * Return dasd_device structure associated with cdev.
600 */
601struct dasd_device *
602dasd_device_from_cdev(struct ccw_device *cdev)
603{
604 struct dasd_devmap *devmap;
605 struct dasd_device *device;
606
607 device = ERR_PTR(-ENODEV);
608 spin_lock(&dasd_devmap_lock);
609 devmap = cdev->dev.driver_data;
610 if (devmap && devmap->device) {
611 device = devmap->device;
612 dasd_get_device(device);
613 }
614 spin_unlock(&dasd_devmap_lock);
615 return device;
616}
617
618/*
619 * SECTION: files in sysfs
620 */
621
622/*
623 * readonly controls the readonly status of a dasd
624 */
625static ssize_t
626dasd_ro_show(struct device *dev, char *buf)
627{
628 struct dasd_devmap *devmap;
629 int ro_flag;
630
631 devmap = dasd_find_busid(dev->bus_id);
632 if (!IS_ERR(devmap))
633 ro_flag = (devmap->features & DASD_FEATURE_READONLY) != 0;
634 else
635 ro_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_READONLY) != 0;
636 return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
637}
638
639static ssize_t
640dasd_ro_store(struct device *dev, const char *buf, size_t count)
641{
642 struct dasd_devmap *devmap;
643 int ro_flag;
644
645 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
646 if (IS_ERR(devmap))
647 return PTR_ERR(devmap);
648 ro_flag = buf[0] == '1';
649 spin_lock(&dasd_devmap_lock);
650 if (ro_flag)
651 devmap->features |= DASD_FEATURE_READONLY;
652 else
653 devmap->features &= ~DASD_FEATURE_READONLY;
654 if (devmap->device) {
655 if (devmap->device->gdp)
656 set_disk_ro(devmap->device->gdp, ro_flag);
657 if (ro_flag)
658 set_bit(DASD_FLAG_RO, &devmap->device->flags);
659 else
660 clear_bit(DASD_FLAG_RO, &devmap->device->flags);
661 }
662 spin_unlock(&dasd_devmap_lock);
663 return count;
664}
665
666static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
667
668/*
669 * use_diag controls whether the driver should use diag rather than ssch
670 * to talk to the device
671 */
672static ssize_t
673dasd_use_diag_show(struct device *dev, char *buf)
674{
675 struct dasd_devmap *devmap;
676 int use_diag;
677
678 devmap = dasd_find_busid(dev->bus_id);
679 if (!IS_ERR(devmap))
680 use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
681 else
682 use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
683 return sprintf(buf, use_diag ? "1\n" : "0\n");
684}
685
686static ssize_t
687dasd_use_diag_store(struct device *dev, const char *buf, size_t count)
688{
689 struct dasd_devmap *devmap;
690 ssize_t rc;
691 int use_diag;
692
693 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
694 if (IS_ERR(devmap))
695 return PTR_ERR(devmap);
696 use_diag = buf[0] == '1';
697 spin_lock(&dasd_devmap_lock);
698 /* Changing diag discipline flag is only allowed in offline state. */
699 rc = count;
700 if (!devmap->device) {
701 if (use_diag)
702 devmap->features |= DASD_FEATURE_USEDIAG;
703 else
704 devmap->features &= ~DASD_FEATURE_USEDIAG;
705 } else
706 rc = -EPERM;
707 spin_unlock(&dasd_devmap_lock);
708 return rc;
709}
710
711static
712DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
713
714static ssize_t
715dasd_discipline_show(struct device *dev, char *buf)
716{
717 struct dasd_devmap *devmap;
718 char *dname;
719
720 spin_lock(&dasd_devmap_lock);
721 dname = "none";
722 devmap = dev->driver_data;
723 if (devmap && devmap->device && devmap->device->discipline)
724 dname = devmap->device->discipline->name;
725 spin_unlock(&dasd_devmap_lock);
726 return snprintf(buf, PAGE_SIZE, "%s\n", dname);
727}
728
729static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
730
731static struct attribute * dasd_attrs[] = {
732 &dev_attr_readonly.attr,
733 &dev_attr_discipline.attr,
734 &dev_attr_use_diag.attr,
735 NULL,
736};
737
738static struct attribute_group dasd_attr_group = {
739 .attrs = dasd_attrs,
740};
741
742int
743dasd_add_sysfs_files(struct ccw_device *cdev)
744{
745 return sysfs_create_group(&cdev->dev.kobj, &dasd_attr_group);
746}
747
748void
749dasd_remove_sysfs_files(struct ccw_device *cdev)
750{
751 sysfs_remove_group(&cdev->dev.kobj, &dasd_attr_group);
752}
753
754
755int
756dasd_devmap_init(void)
757{
758 int i;
759
760 /* Initialize devmap structures. */
761 dasd_max_devindex = 0;
762 for (i = 0; i < 256; i++)
763 INIT_LIST_HEAD(&dasd_hashlists[i]);
764 return 0;
765
766}
767
768void
769dasd_devmap_exit(void)
770{
771 dasd_forget_ranges();
772}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
new file mode 100644
index 000000000000..127699830fa1
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.c
@@ -0,0 +1,541 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_diag.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Based on.......: linux/drivers/s390/block/mdisk.c
5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 *
9 * $Revision: 1.42 $
10 */
11
12#include <linux/config.h>
13#include <linux/stddef.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/hdreg.h> /* HDIO_GETGEO */
17#include <linux/bio.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/dasd.h>
22#include <asm/debug.h>
23#include <asm/ebcdic.h>
24#include <asm/io.h>
25#include <asm/s390_ext.h>
26#include <asm/todclk.h>
27
28#include "dasd_int.h"
29#include "dasd_diag.h"
30
31#ifdef PRINTK_HEADER
32#undef PRINTK_HEADER
33#endif /* PRINTK_HEADER */
34#define PRINTK_HEADER "dasd(diag):"
35
36MODULE_LICENSE("GPL");
37
38struct dasd_discipline dasd_diag_discipline;
39
40struct dasd_diag_private {
41 struct dasd_diag_characteristics rdc_data;
42 struct dasd_diag_rw_io iob;
43 struct dasd_diag_init_io iib;
44 unsigned int pt_block;
45};
46
47struct dasd_diag_req {
48 int block_count;
49 struct dasd_diag_bio bio[0];
50};
51
52static __inline__ int
53dia250(void *iob, int cmd)
54{
55 int rc;
56
57 __asm__ __volatile__(" lhi %0,3\n"
58 " lr 0,%2\n"
59 " diag 0,%1,0x250\n"
60 "0: ipm %0\n"
61 " srl %0,28\n"
62 " or %0,1\n"
63 "1:\n"
64#ifndef CONFIG_ARCH_S390X
65 ".section __ex_table,\"a\"\n"
66 " .align 4\n"
67 " .long 0b,1b\n"
68 ".previous\n"
69#else
70 ".section __ex_table,\"a\"\n"
71 " .align 8\n"
72 " .quad 0b,1b\n"
73 ".previous\n"
74#endif
75 : "=&d" (rc)
76 : "d" (cmd), "d" ((void *) __pa(iob))
77 : "0", "1", "cc");
78 return rc;
79}
80
81static __inline__ int
82mdsk_init_io(struct dasd_device * device, int blocksize, int offset, int size)
83{
84 struct dasd_diag_private *private;
85 struct dasd_diag_init_io *iib;
86 int rc;
87
88 private = (struct dasd_diag_private *) device->private;
89 iib = &private->iib;
90 memset(iib, 0, sizeof (struct dasd_diag_init_io));
91
92 iib->dev_nr = _ccw_device_get_device_number(device->cdev);
93 iib->block_size = blocksize;
94 iib->offset = offset;
95 iib->start_block = 0;
96 iib->end_block = size;
97
98 rc = dia250(iib, INIT_BIO);
99
100 return rc & 3;
101}
102
103static __inline__ int
104mdsk_term_io(struct dasd_device * device)
105{
106 struct dasd_diag_private *private;
107 struct dasd_diag_init_io *iib;
108 int rc;
109
110 private = (struct dasd_diag_private *) device->private;
111 iib = &private->iib;
112 memset(iib, 0, sizeof (struct dasd_diag_init_io));
113 iib->dev_nr = _ccw_device_get_device_number(device->cdev);
114 rc = dia250(iib, TERM_BIO);
115 return rc & 3;
116}
117
118static int
119dasd_start_diag(struct dasd_ccw_req * cqr)
120{
121 struct dasd_device *device;
122 struct dasd_diag_private *private;
123 struct dasd_diag_req *dreq;
124 int rc;
125
126 device = cqr->device;
127 private = (struct dasd_diag_private *) device->private;
128 dreq = (struct dasd_diag_req *) cqr->data;
129
130 private->iob.dev_nr = _ccw_device_get_device_number(device->cdev);
131 private->iob.key = 0;
132 private->iob.flags = 2; /* do asynchronous io */
133 private->iob.block_count = dreq->block_count;
134 private->iob.interrupt_params = (u32)(addr_t) cqr;
135 private->iob.bio_list = __pa(dreq->bio);
136
137 cqr->startclk = get_clock();
138
139 rc = dia250(&private->iob, RW_BIO);
140 if (rc > 8) {
141 DEV_MESSAGE(KERN_WARNING, device, "dia250 returned CC %d", rc);
142 cqr->status = DASD_CQR_ERROR;
143 } else if (rc == 0) {
144 cqr->status = DASD_CQR_DONE;
145 dasd_schedule_bh(device);
146 } else {
147 cqr->status = DASD_CQR_IN_IO;
148 rc = 0;
149 }
150 return rc;
151}
152
153static void
154dasd_ext_handler(struct pt_regs *regs, __u16 code)
155{
156 struct dasd_ccw_req *cqr, *next;
157 struct dasd_device *device;
158 unsigned long long expires;
159 unsigned long flags;
160 char status;
161 int ip;
162
163 /*
164 * Get the external interruption subcode. VM stores
165 * this in the 'cpu address' field associated with
166 * the external interrupt. For diag 250 the subcode
167 * needs to be 3.
168 */
169 if ((S390_lowcore.cpu_addr & 0xff00) != 0x0300)
170 return;
171 status = *((char *) &S390_lowcore.ext_params + 5);
172 ip = S390_lowcore.ext_params;
173
174 if (!ip) { /* no intparm: unsolicited interrupt */
175 MESSAGE(KERN_DEBUG, "%s", "caught unsolicited interrupt");
176 return;
177 }
178 cqr = (struct dasd_ccw_req *)(addr_t) ip;
179 device = (struct dasd_device *) cqr->device;
180 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
181 DEV_MESSAGE(KERN_WARNING, device,
182 " magic number of dasd_ccw_req 0x%08X doesn't"
183 " match discipline 0x%08X",
184 cqr->magic, *(int *) (&device->discipline->name));
185 return;
186 }
187
188 /* get irq lock to modify request queue */
189 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
190
191 cqr->stopclk = get_clock();
192
193 expires = 0;
194 if (status == 0) {
195 cqr->status = DASD_CQR_DONE;
196 /* Start first request on queue if possible -> fast_io. */
197 if (!list_empty(&device->ccw_queue)) {
198 next = list_entry(device->ccw_queue.next,
199 struct dasd_ccw_req, list);
200 if (next->status == DASD_CQR_QUEUED) {
201 if (dasd_start_diag(next) == 0)
202 expires = next->expires;
203 else
204 DEV_MESSAGE(KERN_WARNING, device, "%s",
205 "Interrupt fastpath "
206 "failed!");
207 }
208 }
209 } else
210 cqr->status = DASD_CQR_FAILED;
211
212 if (expires != 0)
213 dasd_set_timer(device, expires);
214 else
215 dasd_clear_timer(device);
216 dasd_schedule_bh(device);
217
218 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
219}
220
221static int
222dasd_diag_check_device(struct dasd_device *device)
223{
224 struct dasd_diag_private *private;
225 struct dasd_diag_characteristics *rdc_data;
226 struct dasd_diag_bio bio;
227 long *label;
228 int sb, bsize;
229 int rc;
230
231 private = (struct dasd_diag_private *) device->private;
232 if (private == NULL) {
233 private = kmalloc(sizeof(struct dasd_diag_private),GFP_KERNEL);
234 if (private == NULL) {
235 DEV_MESSAGE(KERN_WARNING, device, "%s",
236 "memory allocation failed for private data");
237 return -ENOMEM;
238 }
239 device->private = (void *) private;
240 }
241 /* Read Device Characteristics */
242 rdc_data = (void *) &(private->rdc_data);
243 rdc_data->dev_nr = _ccw_device_get_device_number(device->cdev);
244 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
245
246 rc = diag210((struct diag210 *) rdc_data);
247 if (rc)
248 return -ENOTSUPP;
249
250 /* Figure out position of label block */
251 switch (private->rdc_data.vdev_class) {
252 case DEV_CLASS_FBA:
253 private->pt_block = 1;
254 break;
255 case DEV_CLASS_ECKD:
256 private->pt_block = 2;
257 break;
258 default:
259 return -ENOTSUPP;
260 }
261
262 DBF_DEV_EVENT(DBF_INFO, device,
263 "%04X: %04X on real %04X/%02X",
264 rdc_data->dev_nr,
265 rdc_data->vdev_type,
266 rdc_data->rdev_type, rdc_data->rdev_model);
267
268 /* terminate all outstanding operations */
269 mdsk_term_io(device);
270
271 /* figure out blocksize of device */
272 label = (long *) get_zeroed_page(GFP_KERNEL);
273 if (label == NULL) {
274 DEV_MESSAGE(KERN_WARNING, device, "%s",
275 "No memory to allocate initialization request");
276 return -ENOMEM;
277 }
278 /* try all sizes - needed for ECKD devices */
279 for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
280 mdsk_init_io(device, bsize, 0, 64);
281 memset(&bio, 0, sizeof (struct dasd_diag_bio));
282 bio.type = MDSK_READ_REQ;
283 bio.block_number = private->pt_block + 1;
284 bio.buffer = __pa(label);
285 memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
286 private->iob.dev_nr = rdc_data->dev_nr;
287 private->iob.key = 0;
288 private->iob.flags = 0; /* do synchronous io */
289 private->iob.block_count = 1;
290 private->iob.interrupt_params = 0;
291 private->iob.bio_list = __pa(&bio);
292 if (dia250(&private->iob, RW_BIO) == 0)
293 break;
294 mdsk_term_io(device);
295 }
296 if (bsize <= PAGE_SIZE && label[0] == 0xc3d4e2f1) {
297 /* get formatted blocksize from label block */
298 bsize = (int) label[3];
299 device->blocks = label[7];
300 device->bp_block = bsize;
301 device->s2b_shift = 0; /* bits to shift 512 to get a block */
302 for (sb = 512; sb < bsize; sb = sb << 1)
303 device->s2b_shift++;
304
305 DEV_MESSAGE(KERN_INFO, device,
306 "capacity (%dkB blks): %ldkB",
307 (device->bp_block >> 10),
308 (device->blocks << device->s2b_shift) >> 1);
309 rc = 0;
310 } else {
311 if (bsize > PAGE_SIZE)
312 DEV_MESSAGE(KERN_WARNING, device, "%s",
313 "DIAG access failed");
314 else
315 DEV_MESSAGE(KERN_WARNING, device, "%s",
316 "volume is not CMS formatted");
317 rc = -EMEDIUMTYPE;
318 }
319 free_page((long) label);
320 return rc;
321}
322
323static int
324dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
325{
326 if (dasd_check_blocksize(device->bp_block) != 0)
327 return -EINVAL;
328 geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
329 geo->heads = 16;
330 geo->sectors = 128 >> device->s2b_shift;
331 return 0;
332}
333
334static dasd_era_t
335dasd_diag_examine_error(struct dasd_ccw_req * cqr, struct irb * stat)
336{
337 return dasd_era_fatal;
338}
339
340static dasd_erp_fn_t
341dasd_diag_erp_action(struct dasd_ccw_req * cqr)
342{
343 return dasd_default_erp_action;
344}
345
346static dasd_erp_fn_t
347dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
348{
349 return dasd_default_erp_postaction;
350}
351
352static struct dasd_ccw_req *
353dasd_diag_build_cp(struct dasd_device * device, struct request *req)
354{
355 struct dasd_ccw_req *cqr;
356 struct dasd_diag_req *dreq;
357 struct dasd_diag_bio *dbio;
358 struct bio *bio;
359 struct bio_vec *bv;
360 char *dst;
361 int count, datasize;
362 sector_t recid, first_rec, last_rec;
363 unsigned blksize, off;
364 unsigned char rw_cmd;
365 int i;
366
367 if (rq_data_dir(req) == READ)
368 rw_cmd = MDSK_READ_REQ;
369 else if (rq_data_dir(req) == WRITE)
370 rw_cmd = MDSK_WRITE_REQ;
371 else
372 return ERR_PTR(-EINVAL);
373 blksize = device->bp_block;
374 /* Calculate record id of first and last block. */
375 first_rec = req->sector >> device->s2b_shift;
376 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
377 /* Check struct bio and count the number of blocks for the request. */
378 count = 0;
379 rq_for_each_bio(bio, req) {
380 bio_for_each_segment(bv, bio, i) {
381 if (bv->bv_len & (blksize - 1))
382 /* Fba can only do full blocks. */
383 return ERR_PTR(-EINVAL);
384 count += bv->bv_len >> (device->s2b_shift + 9);
385 }
386 }
387 /* Paranoia. */
388 if (count != last_rec - first_rec + 1)
389 return ERR_PTR(-EINVAL);
390 /* Build the request */
391 datasize = sizeof(struct dasd_diag_req) +
392 count*sizeof(struct dasd_diag_bio);
393 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
394 datasize, device);
395 if (IS_ERR(cqr))
396 return cqr;
397
398 dreq = (struct dasd_diag_req *) cqr->data;
399 dreq->block_count = count;
400 dbio = dreq->bio;
401 recid = first_rec;
402 rq_for_each_bio(bio, req) {
403 bio_for_each_segment(bv, bio, i) {
404 dst = page_address(bv->bv_page) + bv->bv_offset;
405 for (off = 0; off < bv->bv_len; off += blksize) {
406 memset(dbio, 0, sizeof (struct dasd_diag_bio));
407 dbio->type = rw_cmd;
408 dbio->block_number = recid + 1;
409 dbio->buffer = __pa(dst);
410 dbio++;
411 dst += blksize;
412 recid++;
413 }
414 }
415 }
416 cqr->buildclk = get_clock();
417 cqr->device = device;
418 cqr->expires = 50 * HZ; /* 50 seconds */
419 cqr->status = DASD_CQR_FILLED;
420 return cqr;
421}
422
423static int
424dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
425{
426 int status;
427
428 status = cqr->status == DASD_CQR_DONE;
429 dasd_sfree_request(cqr, cqr->device);
430 return status;
431}
432
433static int
434dasd_diag_fill_info(struct dasd_device * device,
435 struct dasd_information2_t * info)
436{
437 struct dasd_diag_private *private;
438
439 private = (struct dasd_diag_private *) device->private;
440 info->label_block = private->pt_block;
441 info->FBA_layout = 1;
442 info->format = DASD_FORMAT_LDL;
443 info->characteristics_size = sizeof (struct dasd_diag_characteristics);
444 memcpy(info->characteristics,
445 &((struct dasd_diag_private *) device->private)->rdc_data,
446 sizeof (struct dasd_diag_characteristics));
447 info->confdata_size = 0;
448 return 0;
449}
450
451static void
452dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
453 struct irb *stat)
454{
455 DEV_MESSAGE(KERN_ERR, device, "%s",
456 "dump sense not available for DIAG data");
457}
458
459/*
460 * max_blocks is dependent on the amount of storage that is available
461 * in the static io buffer for each device. Currently each device has
462 * 8192 bytes (=2 pages). dasd diag is only relevant for 31 bit.
463 * The struct dasd_ccw_req has 96 bytes, the struct dasd_diag_req has
464 * 8 bytes and the struct dasd_diag_bio for each block has 16 bytes.
465 * That makes:
466 * (8192 - 96 - 8) / 16 = 505.5 blocks at maximum.
467 * We want to fit two into the available memory so that we can immediately
468 * start the next request if one finishes off. That makes 252.75 blocks
469 * for one request. Give a little safety and the result is 240.
470 */
471struct dasd_discipline dasd_diag_discipline = {
472 .owner = THIS_MODULE,
473 .name = "DIAG",
474 .ebcname = "DIAG",
475 .max_blocks = 240,
476 .check_device = dasd_diag_check_device,
477 .fill_geometry = dasd_diag_fill_geometry,
478 .start_IO = dasd_start_diag,
479 .examine_error = dasd_diag_examine_error,
480 .erp_action = dasd_diag_erp_action,
481 .erp_postaction = dasd_diag_erp_postaction,
482 .build_cp = dasd_diag_build_cp,
483 .free_cp = dasd_diag_free_cp,
484 .dump_sense = dasd_diag_dump_sense,
485 .fill_info = dasd_diag_fill_info,
486};
487
488static int __init
489dasd_diag_init(void)
490{
491 if (!MACHINE_IS_VM) {
492 MESSAGE_LOG(KERN_INFO,
493 "Machine is not VM: %s "
494 "discipline not initializing",
495 dasd_diag_discipline.name);
496 return -EINVAL;
497 }
498 ASCEBC(dasd_diag_discipline.ebcname, 4);
499
500 ctl_set_bit(0, 9);
501 register_external_interrupt(0x2603, dasd_ext_handler);
502 dasd_diag_discipline_pointer = &dasd_diag_discipline;
503 return 0;
504}
505
506static void __exit
507dasd_diag_cleanup(void)
508{
509 if (!MACHINE_IS_VM) {
510 MESSAGE_LOG(KERN_INFO,
511 "Machine is not VM: %s "
512 "discipline not cleaned",
513 dasd_diag_discipline.name);
514 return;
515 }
516 unregister_external_interrupt(0x2603, dasd_ext_handler);
517 ctl_clear_bit(0, 9);
518 dasd_diag_discipline_pointer = NULL;
519}
520
521module_init(dasd_diag_init);
522module_exit(dasd_diag_cleanup);
523
524/*
525 * Overrides for Emacs so that we follow Linus's tabbing style.
526 * Emacs will notice this stuff at the end of the file and automatically
527 * adjust the settings for this buffer only. This must remain at the end
528 * of the file.
529 * ---------------------------------------------------------------------------
530 * Local variables:
531 * c-indent-level: 4
532 * c-brace-imaginary-offset: 0
533 * c-brace-offset: -4
534 * c-argdecl-indent: 4
535 * c-label-offset: -4
536 * c-continued-statement-offset: 4
537 * c-continued-brace-offset: 0
538 * indent-tabs-mode: 1
539 * tab-width: 8
540 * End:
541 */
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
new file mode 100644
index 000000000000..a0c38e303979
--- /dev/null
+++ b/drivers/s390/block/dasd_diag.h
@@ -0,0 +1,66 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_diag.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Based on.......: linux/drivers/s390/block/mdisk.h
5 * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 *
9 * $Revision: 1.6 $
10 */
11
12#define MDSK_WRITE_REQ 0x01
13#define MDSK_READ_REQ 0x02
14
15#define INIT_BIO 0x00
16#define RW_BIO 0x01
17#define TERM_BIO 0x02
18
19#define DEV_CLASS_FBA 0x01
20#define DEV_CLASS_ECKD 0x04
21
22struct dasd_diag_characteristics {
23 u16 dev_nr;
24 u16 rdc_len;
25 u8 vdev_class;
26 u8 vdev_type;
27 u8 vdev_status;
28 u8 vdev_flags;
29 u8 rdev_class;
30 u8 rdev_type;
31 u8 rdev_model;
32 u8 rdev_features;
33} __attribute__ ((packed, aligned(4)));
34
35struct dasd_diag_bio {
36 u8 type;
37 u8 status;
38 u16 spare1;
39 u32 block_number;
40 u32 alet;
41 u32 buffer;
42} __attribute__ ((packed, aligned(8)));
43
44struct dasd_diag_init_io {
45 u16 dev_nr;
46 u16 spare1[11];
47 u32 block_size;
48 u32 offset;
49 u32 start_block;
50 u32 end_block;
51 u32 spare2[6];
52} __attribute__ ((packed, aligned(8)));
53
54struct dasd_diag_rw_io {
55 u16 dev_nr;
56 u16 spare1[11];
57 u8 key;
58 u8 flags;
59 u16 spare2;
60 u32 block_count;
61 u32 alet;
62 u32 bio_list;
63 u32 interrupt_params;
64 u32 spare3[5];
65} __attribute__ ((packed, aligned(8)));
66
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
new file mode 100644
index 000000000000..838aedf78a56
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.c
@@ -0,0 +1,1722 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 *
10 * $Revision: 1.69 $
11 */
12
13#include <linux/config.h>
14#include <linux/stddef.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/hdreg.h> /* HDIO_GETGEO */
18#include <linux/bio.h>
19#include <linux/module.h>
20#include <linux/init.h>
21
22#include <asm/debug.h>
23#include <asm/idals.h>
24#include <asm/ebcdic.h>
25#include <asm/io.h>
26#include <asm/todclk.h>
27#include <asm/uaccess.h>
28#include <asm/ccwdev.h>
29
30#include "dasd_int.h"
31#include "dasd_eckd.h"
32
33#ifdef PRINTK_HEADER
34#undef PRINTK_HEADER
35#endif /* PRINTK_HEADER */
36#define PRINTK_HEADER "dasd(eckd):"
37
38#define ECKD_C0(i) (i->home_bytes)
39#define ECKD_F(i) (i->formula)
40#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
41 (i->factors.f_0x02.f1))
42#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
43 (i->factors.f_0x02.f2))
44#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
45 (i->factors.f_0x02.f3))
46#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
47#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
48#define ECKD_F6(i) (i->factor6)
49#define ECKD_F7(i) (i->factor7)
50#define ECKD_F8(i) (i->factor8)
51
52MODULE_LICENSE("GPL");
53
54static struct dasd_discipline dasd_eckd_discipline;
55
56struct dasd_eckd_private {
57 struct dasd_eckd_characteristics rdc_data;
58 struct dasd_eckd_confdata conf_data;
59 struct dasd_eckd_path path_data;
60 struct eckd_count count_area[5];
61 int init_cqr_status;
62 int uses_cdl;
63 struct attrib_data_t attrib; /* e.g. cache operations */
64};
65
66/* The ccw bus type uses this table to find devices that it sends to
67 * dasd_eckd_probe */
68static struct ccw_device_id dasd_eckd_ids[] = {
69 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), driver_info: 0x1},
70 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), driver_info: 0x2},
71 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), driver_info: 0x3},
72 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), driver_info: 0x4},
73 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), driver_info: 0x5},
74 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), driver_info: 0x6},
75 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), driver_info: 0x7},
76 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), driver_info: 0x8},
77 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), driver_info: 0x9},
78 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), driver_info: 0xa},
79 { /* end of list */ },
80};
81
82MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
83
84static struct ccw_driver dasd_eckd_driver; /* see below */
85
86/* initial attempt at a probe function. this can be simplified once
87 * the other detection code is gone */
88static int
89dasd_eckd_probe (struct ccw_device *cdev)
90{
91 int ret;
92
93 ret = dasd_generic_probe (cdev, &dasd_eckd_discipline);
94 if (ret)
95 return ret;
96 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | CCWDEV_ALLOW_FORCE);
97 return 0;
98}
99
100static int
101dasd_eckd_set_online(struct ccw_device *cdev)
102{
103 return dasd_generic_set_online (cdev, &dasd_eckd_discipline);
104}
105
106static struct ccw_driver dasd_eckd_driver = {
107 .name = "dasd-eckd",
108 .owner = THIS_MODULE,
109 .ids = dasd_eckd_ids,
110 .probe = dasd_eckd_probe,
111 .remove = dasd_generic_remove,
112 .set_offline = dasd_generic_set_offline,
113 .set_online = dasd_eckd_set_online,
114 .notify = dasd_generic_notify,
115};
116
117static const int sizes_trk0[] = { 28, 148, 84 };
118#define LABEL_SIZE 140
119
120static inline unsigned int
121round_up_multiple(unsigned int no, unsigned int mult)
122{
123 int rem = no % mult;
124 return (rem ? no - rem + mult : no);
125}
126
127static inline unsigned int
128ceil_quot(unsigned int d1, unsigned int d2)
129{
130 return (d1 + (d2 - 1)) / d2;
131}
132
133static inline int
134bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
135{
136 unsigned int fl1, fl2, int1, int2;
137 int bpr;
138
139 switch (rdc->formula) {
140 case 0x01:
141 fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
142 fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
143 ECKD_F1(rdc));
144 bpr = fl1 + fl2;
145 break;
146 case 0x02:
147 int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
148 int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
149 fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
150 ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
151 ECKD_F1(rdc));
152 fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
153 ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
154 ECKD_F1(rdc));
155 bpr = fl1 + fl2;
156 break;
157 default:
158 bpr = 0;
159 break;
160 }
161 return bpr;
162}
163
164static inline unsigned int
165bytes_per_track(struct dasd_eckd_characteristics *rdc)
166{
167 return *(unsigned int *) (rdc->byte_per_track) >> 8;
168}
169
170static inline unsigned int
171recs_per_track(struct dasd_eckd_characteristics * rdc,
172 unsigned int kl, unsigned int dl)
173{
174 int dn, kn;
175
176 switch (rdc->dev_type) {
177 case 0x3380:
178 if (kl)
179 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
180 ceil_quot(dl + 12, 32));
181 else
182 return 1499 / (15 + ceil_quot(dl + 12, 32));
183 case 0x3390:
184 dn = ceil_quot(dl + 6, 232) + 1;
185 if (kl) {
186 kn = ceil_quot(kl + 6, 232) + 1;
187 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
188 9 + ceil_quot(dl + 6 * dn, 34));
189 } else
190 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
191 case 0x9345:
192 dn = ceil_quot(dl + 6, 232) + 1;
193 if (kl) {
194 kn = ceil_quot(kl + 6, 232) + 1;
195 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
196 ceil_quot(dl + 6 * dn, 34));
197 } else
198 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
199 }
200 return 0;
201}
202
203static inline void
204check_XRC (struct ccw1 *de_ccw,
205 struct DE_eckd_data *data,
206 struct dasd_device *device)
207{
208 struct dasd_eckd_private *private;
209
210 private = (struct dasd_eckd_private *) device->private;
211
212 /* switch on System Time Stamp - needed for XRC Support */
213 if (private->rdc_data.facilities.XRC_supported) {
214
215 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
216 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
217
218 data->ep_sys_time = get_clock ();
219
220 de_ccw->count = sizeof (struct DE_eckd_data);
221 de_ccw->flags |= CCW_FLAG_SLI;
222 }
223
224 return;
225
226} /* end check_XRC */
227
228static inline void
229define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
230 int totrk, int cmd, struct dasd_device * device)
231{
232 struct dasd_eckd_private *private;
233 struct ch_t geo, beg, end;
234
235 private = (struct dasd_eckd_private *) device->private;
236
237 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
238 ccw->flags = 0;
239 ccw->count = 16;
240 ccw->cda = (__u32) __pa(data);
241
242 memset(data, 0, sizeof (struct DE_eckd_data));
243 switch (cmd) {
244 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
245 case DASD_ECKD_CCW_READ_RECORD_ZERO:
246 case DASD_ECKD_CCW_READ:
247 case DASD_ECKD_CCW_READ_MT:
248 case DASD_ECKD_CCW_READ_CKD:
249 case DASD_ECKD_CCW_READ_CKD_MT:
250 case DASD_ECKD_CCW_READ_KD:
251 case DASD_ECKD_CCW_READ_KD_MT:
252 case DASD_ECKD_CCW_READ_COUNT:
253 data->mask.perm = 0x1;
254 data->attributes.operation = private->attrib.operation;
255 break;
256 case DASD_ECKD_CCW_WRITE:
257 case DASD_ECKD_CCW_WRITE_MT:
258 case DASD_ECKD_CCW_WRITE_KD:
259 case DASD_ECKD_CCW_WRITE_KD_MT:
260 data->mask.perm = 0x02;
261 data->attributes.operation = private->attrib.operation;
262 check_XRC (ccw, data, device);
263 break;
264 case DASD_ECKD_CCW_WRITE_CKD:
265 case DASD_ECKD_CCW_WRITE_CKD_MT:
266 data->attributes.operation = DASD_BYPASS_CACHE;
267 check_XRC (ccw, data, device);
268 break;
269 case DASD_ECKD_CCW_ERASE:
270 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
271 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
272 data->mask.perm = 0x3;
273 data->mask.auth = 0x1;
274 data->attributes.operation = DASD_BYPASS_CACHE;
275 check_XRC (ccw, data, device);
276 break;
277 default:
278 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
279 break;
280 }
281
282 data->attributes.mode = 0x3; /* ECKD */
283
284 if ((private->rdc_data.cu_type == 0x2105 ||
285 private->rdc_data.cu_type == 0x2107 ||
286 private->rdc_data.cu_type == 0x1750)
287 && !(private->uses_cdl && trk < 2))
288 data->ga_extended |= 0x40; /* Regular Data Format Mode */
289
290 geo.cyl = private->rdc_data.no_cyl;
291 geo.head = private->rdc_data.trk_per_cyl;
292 beg.cyl = trk / geo.head;
293 beg.head = trk % geo.head;
294 end.cyl = totrk / geo.head;
295 end.head = totrk % geo.head;
296
297 /* check for sequential prestage - enhance cylinder range */
298 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
299 data->attributes.operation == DASD_SEQ_ACCESS) {
300
301 if (end.cyl + private->attrib.nr_cyl < geo.cyl)
302 end.cyl += private->attrib.nr_cyl;
303 else
304 end.cyl = (geo.cyl - 1);
305 }
306
307 data->beg_ext.cyl = beg.cyl;
308 data->beg_ext.head = beg.head;
309 data->end_ext.cyl = end.cyl;
310 data->end_ext.head = end.head;
311}
312
313static inline void
314locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
315 int rec_on_trk, int no_rec, int cmd,
316 struct dasd_device * device, int reclen)
317{
318 struct dasd_eckd_private *private;
319 int sector;
320 int dn, d;
321
322 private = (struct dasd_eckd_private *) device->private;
323
324 DBF_DEV_EVENT(DBF_INFO, device,
325 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
326 trk, rec_on_trk, no_rec, cmd, reclen);
327
328 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
329 ccw->flags = 0;
330 ccw->count = 16;
331 ccw->cda = (__u32) __pa(data);
332
333 memset(data, 0, sizeof (struct LO_eckd_data));
334 sector = 0;
335 if (rec_on_trk) {
336 switch (private->rdc_data.dev_type) {
337 case 0x3390:
338 dn = ceil_quot(reclen + 6, 232);
339 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
340 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
341 break;
342 case 0x3380:
343 d = 7 + ceil_quot(reclen + 12, 32);
344 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
345 break;
346 }
347 }
348 data->sector = sector;
349 data->count = no_rec;
350 switch (cmd) {
351 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
352 data->operation.orientation = 0x3;
353 data->operation.operation = 0x03;
354 break;
355 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
356 data->operation.orientation = 0x3;
357 data->operation.operation = 0x16;
358 break;
359 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
360 data->operation.orientation = 0x1;
361 data->operation.operation = 0x03;
362 data->count++;
363 break;
364 case DASD_ECKD_CCW_READ_RECORD_ZERO:
365 data->operation.orientation = 0x3;
366 data->operation.operation = 0x16;
367 data->count++;
368 break;
369 case DASD_ECKD_CCW_WRITE:
370 case DASD_ECKD_CCW_WRITE_MT:
371 case DASD_ECKD_CCW_WRITE_KD:
372 case DASD_ECKD_CCW_WRITE_KD_MT:
373 data->auxiliary.last_bytes_used = 0x1;
374 data->length = reclen;
375 data->operation.operation = 0x01;
376 break;
377 case DASD_ECKD_CCW_WRITE_CKD:
378 case DASD_ECKD_CCW_WRITE_CKD_MT:
379 data->auxiliary.last_bytes_used = 0x1;
380 data->length = reclen;
381 data->operation.operation = 0x03;
382 break;
383 case DASD_ECKD_CCW_READ:
384 case DASD_ECKD_CCW_READ_MT:
385 case DASD_ECKD_CCW_READ_KD:
386 case DASD_ECKD_CCW_READ_KD_MT:
387 data->auxiliary.last_bytes_used = 0x1;
388 data->length = reclen;
389 data->operation.operation = 0x06;
390 break;
391 case DASD_ECKD_CCW_READ_CKD:
392 case DASD_ECKD_CCW_READ_CKD_MT:
393 data->auxiliary.last_bytes_used = 0x1;
394 data->length = reclen;
395 data->operation.operation = 0x16;
396 break;
397 case DASD_ECKD_CCW_READ_COUNT:
398 data->operation.operation = 0x06;
399 break;
400 case DASD_ECKD_CCW_ERASE:
401 data->length = reclen;
402 data->auxiliary.last_bytes_used = 0x1;
403 data->operation.operation = 0x0b;
404 break;
405 default:
406 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
407 }
408 data->seek_addr.cyl = data->search_arg.cyl =
409 trk / private->rdc_data.trk_per_cyl;
410 data->seek_addr.head = data->search_arg.head =
411 trk % private->rdc_data.trk_per_cyl;
412 data->search_arg.record = rec_on_trk;
413}
414
415/*
416 * Returns 1 if the block is one of the special blocks that needs
417 * to get read/written with the KD variant of the command.
418 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
419 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
420 * Luckily the KD variants differ only by one bit (0x08) from the
421 * normal variant. So don't wonder about code like:
422 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
423 * ccw->cmd_code |= 0x8;
424 */
425static inline int
426dasd_eckd_cdl_special(int blk_per_trk, int recid)
427{
428 if (recid < 3)
429 return 1;
430 if (recid < blk_per_trk)
431 return 0;
432 if (recid < 2 * blk_per_trk)
433 return 1;
434 return 0;
435}
436
437/*
438 * Returns the record size for the special blocks of the cdl format.
439 * Only returns something useful if dasd_eckd_cdl_special is true
440 * for the recid.
441 */
442static inline int
443dasd_eckd_cdl_reclen(int recid)
444{
445 if (recid < 3)
446 return sizes_trk0[recid];
447 return LABEL_SIZE;
448}
449
450static int
451dasd_eckd_read_conf(struct dasd_device *device)
452{
453 void *conf_data;
454 int conf_len, conf_data_saved;
455 int rc;
456 __u8 lpm;
457 struct dasd_eckd_private *private;
458 struct dasd_eckd_path *path_data;
459
460 private = (struct dasd_eckd_private *) device->private;
461 path_data = (struct dasd_eckd_path *) &private->path_data;
462 path_data->opm = ccw_device_get_path_mask(device->cdev);
463 lpm = 0x80;
464 conf_data_saved = 0;
465
466 /* get configuration data per operational path */
467 for (lpm = 0x80; lpm; lpm>>= 1) {
468 if (lpm & path_data->opm){
469 rc = read_conf_data_lpm(device->cdev, &conf_data,
470 &conf_len, lpm);
471 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
472 MESSAGE(KERN_WARNING,
473 "Read configuration data returned "
474 "error %d", rc);
475 return rc;
476 }
477 if (conf_data == NULL) {
478 MESSAGE(KERN_WARNING, "%s", "No configuration "
479 "data retrieved");
480 continue; /* no errror */
481 }
482 if (conf_len != sizeof (struct dasd_eckd_confdata)) {
483 MESSAGE(KERN_WARNING,
484 "sizes of configuration data mismatch"
485 "%d (read) vs %ld (expected)",
486 conf_len,
487 sizeof (struct dasd_eckd_confdata));
488 kfree(conf_data);
489 continue; /* no errror */
490 }
491 /* save first valid configuration data */
492 if (!conf_data_saved){
493 memcpy(&private->conf_data, conf_data,
494 sizeof (struct dasd_eckd_confdata));
495 conf_data_saved++;
496 }
497 switch (((char *)conf_data)[242] & 0x07){
498 case 0x02:
499 path_data->npm |= lpm;
500 break;
501 case 0x03:
502 path_data->ppm |= lpm;
503 break;
504 }
505 kfree(conf_data);
506 }
507 }
508 return 0;
509}
510
511
512static int
513dasd_eckd_check_characteristics(struct dasd_device *device)
514{
515 struct dasd_eckd_private *private;
516 void *rdc_data;
517 int rc;
518
519 private = (struct dasd_eckd_private *) device->private;
520 if (private == NULL) {
521 private = kmalloc(sizeof(struct dasd_eckd_private),
522 GFP_KERNEL | GFP_DMA);
523 if (private == NULL) {
524 DEV_MESSAGE(KERN_WARNING, device, "%s",
525 "memory allocation failed for private "
526 "data");
527 return -ENOMEM;
528 }
529 memset(private, 0, sizeof(struct dasd_eckd_private));
530 device->private = (void *) private;
531 }
532 /* Invalidate status of initial analysis. */
533 private->init_cqr_status = -1;
534 /* Set default cache operations. */
535 private->attrib.operation = DASD_NORMAL_CACHE;
536 private->attrib.nr_cyl = 0;
537
538 /* Read Device Characteristics */
539 rdc_data = (void *) &(private->rdc_data);
540 rc = read_dev_chars(device->cdev, &rdc_data, 64);
541 if (rc) {
542 DEV_MESSAGE(KERN_WARNING, device,
543 "Read device characteristics returned error %d",
544 rc);
545 return rc;
546 }
547
548 DEV_MESSAGE(KERN_INFO, device,
549 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
550 private->rdc_data.dev_type,
551 private->rdc_data.dev_model,
552 private->rdc_data.cu_type,
553 private->rdc_data.cu_model.model,
554 private->rdc_data.no_cyl,
555 private->rdc_data.trk_per_cyl,
556 private->rdc_data.sec_per_trk);
557
558 /* Read Configuration Data */
559 rc = dasd_eckd_read_conf (device);
560 return rc;
561
562}
563
564static struct dasd_ccw_req *
565dasd_eckd_analysis_ccw(struct dasd_device *device)
566{
567 struct dasd_eckd_private *private;
568 struct eckd_count *count_data;
569 struct LO_eckd_data *LO_data;
570 struct dasd_ccw_req *cqr;
571 struct ccw1 *ccw;
572 int cplength, datasize;
573 int i;
574
575 private = (struct dasd_eckd_private *) device->private;
576
577 cplength = 8;
578 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
579 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
580 cplength, datasize, device);
581 if (IS_ERR(cqr))
582 return cqr;
583 ccw = cqr->cpaddr;
584 /* Define extent for the first 3 tracks. */
585 define_extent(ccw++, cqr->data, 0, 2,
586 DASD_ECKD_CCW_READ_COUNT, device);
587 LO_data = cqr->data + sizeof (struct DE_eckd_data);
588 /* Locate record for the first 4 records on track 0. */
589 ccw[-1].flags |= CCW_FLAG_CC;
590 locate_record(ccw++, LO_data++, 0, 0, 4,
591 DASD_ECKD_CCW_READ_COUNT, device, 0);
592
593 count_data = private->count_area;
594 for (i = 0; i < 4; i++) {
595 ccw[-1].flags |= CCW_FLAG_CC;
596 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
597 ccw->flags = 0;
598 ccw->count = 8;
599 ccw->cda = (__u32)(addr_t) count_data;
600 ccw++;
601 count_data++;
602 }
603
604 /* Locate record for the first record on track 2. */
605 ccw[-1].flags |= CCW_FLAG_CC;
606 locate_record(ccw++, LO_data++, 2, 0, 1,
607 DASD_ECKD_CCW_READ_COUNT, device, 0);
608 /* Read count ccw. */
609 ccw[-1].flags |= CCW_FLAG_CC;
610 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
611 ccw->flags = 0;
612 ccw->count = 8;
613 ccw->cda = (__u32)(addr_t) count_data;
614
615 cqr->device = device;
616 cqr->retries = 0;
617 cqr->buildclk = get_clock();
618 cqr->status = DASD_CQR_FILLED;
619 return cqr;
620}
621
622/*
623 * This is the callback function for the init_analysis cqr. It saves
624 * the status of the initial analysis ccw before it frees it and kicks
625 * the device to continue the startup sequence. This will call
626 * dasd_eckd_do_analysis again (if the devices has not been marked
627 * for deletion in the meantime).
628 */
629static void
630dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
631{
632 struct dasd_eckd_private *private;
633 struct dasd_device *device;
634
635 device = init_cqr->device;
636 private = (struct dasd_eckd_private *) device->private;
637 private->init_cqr_status = init_cqr->status;
638 dasd_sfree_request(init_cqr, device);
639 dasd_kick_device(device);
640}
641
642static int
643dasd_eckd_start_analysis(struct dasd_device *device)
644{
645 struct dasd_eckd_private *private;
646 struct dasd_ccw_req *init_cqr;
647
648 private = (struct dasd_eckd_private *) device->private;
649 init_cqr = dasd_eckd_analysis_ccw(device);
650 if (IS_ERR(init_cqr))
651 return PTR_ERR(init_cqr);
652 init_cqr->callback = dasd_eckd_analysis_callback;
653 init_cqr->callback_data = NULL;
654 init_cqr->expires = 5*HZ;
655 dasd_add_request_head(init_cqr);
656 return -EAGAIN;
657}
658
659static int
660dasd_eckd_end_analysis(struct dasd_device *device)
661{
662 struct dasd_eckd_private *private;
663 struct eckd_count *count_area;
664 unsigned int sb, blk_per_trk;
665 int status, i;
666
667 private = (struct dasd_eckd_private *) device->private;
668 status = private->init_cqr_status;
669 private->init_cqr_status = -1;
670 if (status != DASD_CQR_DONE) {
671 DEV_MESSAGE(KERN_WARNING, device, "%s",
672 "volume analysis returned unformatted disk");
673 return -EMEDIUMTYPE;
674 }
675
676 private->uses_cdl = 1;
677 /* Calculate number of blocks/records per track. */
678 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
679 /* Check Track 0 for Compatible Disk Layout */
680 count_area = NULL;
681 for (i = 0; i < 3; i++) {
682 if (private->count_area[i].kl != 4 ||
683 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
684 private->uses_cdl = 0;
685 break;
686 }
687 }
688 if (i == 3)
689 count_area = &private->count_area[4];
690
691 if (private->uses_cdl == 0) {
692 for (i = 0; i < 5; i++) {
693 if ((private->count_area[i].kl != 0) ||
694 (private->count_area[i].dl !=
695 private->count_area[0].dl))
696 break;
697 }
698 if (i == 5)
699 count_area = &private->count_area[0];
700 } else {
701 if (private->count_area[3].record == 1)
702 DEV_MESSAGE(KERN_WARNING, device, "%s",
703 "Trk 0: no records after VTOC!");
704 }
705 if (count_area != NULL && count_area->kl == 0) {
706 /* we found notthing violating our disk layout */
707 if (dasd_check_blocksize(count_area->dl) == 0)
708 device->bp_block = count_area->dl;
709 }
710 if (device->bp_block == 0) {
711 DEV_MESSAGE(KERN_WARNING, device, "%s",
712 "Volume has incompatible disk layout");
713 return -EMEDIUMTYPE;
714 }
715 device->s2b_shift = 0; /* bits to shift 512 to get a block */
716 for (sb = 512; sb < device->bp_block; sb = sb << 1)
717 device->s2b_shift++;
718
719 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
720 device->blocks = (private->rdc_data.no_cyl *
721 private->rdc_data.trk_per_cyl *
722 blk_per_trk);
723
724 DEV_MESSAGE(KERN_INFO, device,
725 "(%dkB blks): %dkB at %dkB/trk %s",
726 (device->bp_block >> 10),
727 ((private->rdc_data.no_cyl *
728 private->rdc_data.trk_per_cyl *
729 blk_per_trk * (device->bp_block >> 9)) >> 1),
730 ((blk_per_trk * device->bp_block) >> 10),
731 private->uses_cdl ?
732 "compatible disk layout" : "linux disk layout");
733
734 return 0;
735}
736
737static int
738dasd_eckd_do_analysis(struct dasd_device *device)
739{
740 struct dasd_eckd_private *private;
741
742 private = (struct dasd_eckd_private *) device->private;
743 if (private->init_cqr_status < 0)
744 return dasd_eckd_start_analysis(device);
745 else
746 return dasd_eckd_end_analysis(device);
747}
748
749static int
750dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
751{
752 struct dasd_eckd_private *private;
753
754 private = (struct dasd_eckd_private *) device->private;
755 if (dasd_check_blocksize(device->bp_block) == 0) {
756 geo->sectors = recs_per_track(&private->rdc_data,
757 0, device->bp_block);
758 }
759 geo->cylinders = private->rdc_data.no_cyl;
760 geo->heads = private->rdc_data.trk_per_cyl;
761 return 0;
762}
763
764static struct dasd_ccw_req *
765dasd_eckd_format_device(struct dasd_device * device,
766 struct format_data_t * fdata)
767{
768 struct dasd_eckd_private *private;
769 struct dasd_ccw_req *fcp;
770 struct eckd_count *ect;
771 struct ccw1 *ccw;
772 void *data;
773 int rpt, cyl, head;
774 int cplength, datasize;
775 int i;
776
777 private = (struct dasd_eckd_private *) device->private;
778 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
779 cyl = fdata->start_unit / private->rdc_data.trk_per_cyl;
780 head = fdata->start_unit % private->rdc_data.trk_per_cyl;
781
782 /* Sanity checks. */
783 if (fdata->start_unit >=
784 (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) {
785 DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!",
786 fdata->start_unit);
787 return ERR_PTR(-EINVAL);
788 }
789 if (fdata->start_unit > fdata->stop_unit) {
790 DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.",
791 fdata->start_unit);
792 return ERR_PTR(-EINVAL);
793 }
794 if (dasd_check_blocksize(fdata->blksize) != 0) {
795 DEV_MESSAGE(KERN_WARNING, device,
796 "Invalid blocksize %d...terminating!",
797 fdata->blksize);
798 return ERR_PTR(-EINVAL);
799 }
800
801 /*
802 * fdata->intensity is a bit string that tells us what to do:
803 * Bit 0: write record zero
804 * Bit 1: write home address, currently not supported
805 * Bit 2: invalidate tracks
806 * Bit 3: use OS/390 compatible disk layout (cdl)
807 * Only some bit combinations do make sense.
808 */
809 switch (fdata->intensity) {
810 case 0x00: /* Normal format */
811 case 0x08: /* Normal format, use cdl. */
812 cplength = 2 + rpt;
813 datasize = sizeof(struct DE_eckd_data) +
814 sizeof(struct LO_eckd_data) +
815 rpt * sizeof(struct eckd_count);
816 break;
817 case 0x01: /* Write record zero and format track. */
818 case 0x09: /* Write record zero and format track, use cdl. */
819 cplength = 3 + rpt;
820 datasize = sizeof(struct DE_eckd_data) +
821 sizeof(struct LO_eckd_data) +
822 sizeof(struct eckd_count) +
823 rpt * sizeof(struct eckd_count);
824 break;
825 case 0x04: /* Invalidate track. */
826 case 0x0c: /* Invalidate track, use cdl. */
827 cplength = 3;
828 datasize = sizeof(struct DE_eckd_data) +
829 sizeof(struct LO_eckd_data) +
830 sizeof(struct eckd_count);
831 break;
832 default:
833 DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.",
834 fdata->intensity);
835 return ERR_PTR(-EINVAL);
836 }
837 /* Allocate the format ccw request. */
838 fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
839 cplength, datasize, device);
840 if (IS_ERR(fcp))
841 return fcp;
842
843 data = fcp->data;
844 ccw = fcp->cpaddr;
845
846 switch (fdata->intensity & ~0x08) {
847 case 0x00: /* Normal format. */
848 define_extent(ccw++, (struct DE_eckd_data *) data,
849 fdata->start_unit, fdata->start_unit,
850 DASD_ECKD_CCW_WRITE_CKD, device);
851 data += sizeof(struct DE_eckd_data);
852 ccw[-1].flags |= CCW_FLAG_CC;
853 locate_record(ccw++, (struct LO_eckd_data *) data,
854 fdata->start_unit, 0, rpt,
855 DASD_ECKD_CCW_WRITE_CKD, device,
856 fdata->blksize);
857 data += sizeof(struct LO_eckd_data);
858 break;
859 case 0x01: /* Write record zero + format track. */
860 define_extent(ccw++, (struct DE_eckd_data *) data,
861 fdata->start_unit, fdata->start_unit,
862 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
863 device);
864 data += sizeof(struct DE_eckd_data);
865 ccw[-1].flags |= CCW_FLAG_CC;
866 locate_record(ccw++, (struct LO_eckd_data *) data,
867 fdata->start_unit, 0, rpt + 1,
868 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
869 device->bp_block);
870 data += sizeof(struct LO_eckd_data);
871 break;
872 case 0x04: /* Invalidate track. */
873 define_extent(ccw++, (struct DE_eckd_data *) data,
874 fdata->start_unit, fdata->start_unit,
875 DASD_ECKD_CCW_WRITE_CKD, device);
876 data += sizeof(struct DE_eckd_data);
877 ccw[-1].flags |= CCW_FLAG_CC;
878 locate_record(ccw++, (struct LO_eckd_data *) data,
879 fdata->start_unit, 0, 1,
880 DASD_ECKD_CCW_WRITE_CKD, device, 8);
881 data += sizeof(struct LO_eckd_data);
882 break;
883 }
884 if (fdata->intensity & 0x01) { /* write record zero */
885 ect = (struct eckd_count *) data;
886 data += sizeof(struct eckd_count);
887 ect->cyl = cyl;
888 ect->head = head;
889 ect->record = 0;
890 ect->kl = 0;
891 ect->dl = 8;
892 ccw[-1].flags |= CCW_FLAG_CC;
893 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
894 ccw->flags = CCW_FLAG_SLI;
895 ccw->count = 8;
896 ccw->cda = (__u32)(addr_t) ect;
897 ccw++;
898 }
899 if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */
900 ect = (struct eckd_count *) data;
901 data += sizeof(struct eckd_count);
902 ect->cyl = cyl;
903 ect->head = head;
904 ect->record = 1;
905 ect->kl = 0;
906 ect->dl = 0;
907 ccw[-1].flags |= CCW_FLAG_CC;
908 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
909 ccw->flags = CCW_FLAG_SLI;
910 ccw->count = 8;
911 ccw->cda = (__u32)(addr_t) ect;
912 } else { /* write remaining records */
913 for (i = 0; i < rpt; i++) {
914 ect = (struct eckd_count *) data;
915 data += sizeof(struct eckd_count);
916 ect->cyl = cyl;
917 ect->head = head;
918 ect->record = i + 1;
919 ect->kl = 0;
920 ect->dl = fdata->blksize;
921 /* Check for special tracks 0-1 when formatting CDL */
922 if ((fdata->intensity & 0x08) &&
923 fdata->start_unit == 0) {
924 if (i < 3) {
925 ect->kl = 4;
926 ect->dl = sizes_trk0[i] - 4;
927 }
928 }
929 if ((fdata->intensity & 0x08) &&
930 fdata->start_unit == 1) {
931 ect->kl = 44;
932 ect->dl = LABEL_SIZE - 44;
933 }
934 ccw[-1].flags |= CCW_FLAG_CC;
935 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
936 ccw->flags = CCW_FLAG_SLI;
937 ccw->count = 8;
938 ccw->cda = (__u32)(addr_t) ect;
939 ccw++;
940 }
941 }
942 fcp->device = device;
943 fcp->retries = 2; /* set retry counter to enable ERP */
944 fcp->buildclk = get_clock();
945 fcp->status = DASD_CQR_FILLED;
946 return fcp;
947}
948
949static dasd_era_t
950dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
951{
952 struct dasd_device *device = (struct dasd_device *) cqr->device;
953 struct ccw_device *cdev = device->cdev;
954
955 if (irb->scsw.cstat == 0x00 &&
956 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
957 return dasd_era_none;
958
959 switch (cdev->id.cu_type) {
960 case 0x3990:
961 case 0x2105:
962 case 0x2107:
963 case 0x1750:
964 return dasd_3990_erp_examine(cqr, irb);
965 case 0x9343:
966 return dasd_9343_erp_examine(cqr, irb);
967 case 0x3880:
968 default:
969 DEV_MESSAGE(KERN_WARNING, device, "%s",
970 "default (unknown CU type) - RECOVERABLE return");
971 return dasd_era_recover;
972 }
973}
974
975static dasd_erp_fn_t
976dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
977{
978 struct dasd_device *device = (struct dasd_device *) cqr->device;
979 struct ccw_device *cdev = device->cdev;
980
981 switch (cdev->id.cu_type) {
982 case 0x3990:
983 case 0x2105:
984 case 0x2107:
985 case 0x1750:
986 return dasd_3990_erp_action;
987 case 0x9343:
988 case 0x3880:
989 default:
990 return dasd_default_erp_action;
991 }
992}
993
994static dasd_erp_fn_t
995dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
996{
997 return dasd_default_erp_postaction;
998}
999
1000static struct dasd_ccw_req *
1001dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1002{
1003 struct dasd_eckd_private *private;
1004 unsigned long *idaws;
1005 struct LO_eckd_data *LO_data;
1006 struct dasd_ccw_req *cqr;
1007 struct ccw1 *ccw;
1008 struct bio *bio;
1009 struct bio_vec *bv;
1010 char *dst;
1011 unsigned int blksize, blk_per_trk, off;
1012 int count, cidaw, cplength, datasize;
1013 sector_t recid, first_rec, last_rec;
1014 sector_t first_trk, last_trk;
1015 unsigned int first_offs, last_offs;
1016 unsigned char cmd, rcmd;
1017 int i;
1018
1019 private = (struct dasd_eckd_private *) device->private;
1020 if (rq_data_dir(req) == READ)
1021 cmd = DASD_ECKD_CCW_READ_MT;
1022 else if (rq_data_dir(req) == WRITE)
1023 cmd = DASD_ECKD_CCW_WRITE_MT;
1024 else
1025 return ERR_PTR(-EINVAL);
1026 /* Calculate number of blocks/records per track. */
1027 blksize = device->bp_block;
1028 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1029 /* Calculate record id of first and last block. */
1030 first_rec = first_trk = req->sector >> device->s2b_shift;
1031 first_offs = sector_div(first_trk, blk_per_trk);
1032 last_rec = last_trk =
1033 (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
1034 last_offs = sector_div(last_trk, blk_per_trk);
1035 /* Check struct bio and count the number of blocks for the request. */
1036 count = 0;
1037 cidaw = 0;
1038 rq_for_each_bio(bio, req) {
1039 bio_for_each_segment(bv, bio, i) {
1040 if (bv->bv_len & (blksize - 1))
1041 /* Eckd can only do full blocks. */
1042 return ERR_PTR(-EINVAL);
1043 count += bv->bv_len >> (device->s2b_shift + 9);
1044#if defined(CONFIG_ARCH_S390X)
1045 if (idal_is_needed (page_address(bv->bv_page),
1046 bv->bv_len))
1047 cidaw += bv->bv_len >> (device->s2b_shift + 9);
1048#endif
1049 }
1050 }
1051 /* Paranoia. */
1052 if (count != last_rec - first_rec + 1)
1053 return ERR_PTR(-EINVAL);
1054 /* 1x define extent + 1x locate record + number of blocks */
1055 cplength = 2 + count;
1056 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1057 datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) +
1058 cidaw * sizeof(unsigned long);
1059 /* Find out the number of additional locate record ccws for cdl. */
1060 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1061 if (last_rec >= 2*blk_per_trk)
1062 count = 2*blk_per_trk - first_rec;
1063 cplength += count;
1064 datasize += count*sizeof(struct LO_eckd_data);
1065 }
1066 /* Allocate the ccw request. */
1067 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1068 cplength, datasize, device);
1069 if (IS_ERR(cqr))
1070 return cqr;
1071 ccw = cqr->cpaddr;
1072 /* First ccw is define extent. */
1073 define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device);
1074 /* Build locate_record+read/write/ccws. */
1075 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1076 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1077 recid = first_rec;
1078 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1079 /* Only standard blocks so there is just one locate record. */
1080 ccw[-1].flags |= CCW_FLAG_CC;
1081 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1082 last_rec - recid + 1, cmd, device, blksize);
1083 }
1084 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1085 dst = page_address(bv->bv_page) + bv->bv_offset;
1086 if (dasd_page_cache) {
1087 char *copy = kmem_cache_alloc(dasd_page_cache,
1088 SLAB_DMA | __GFP_NOWARN);
1089 if (copy && rq_data_dir(req) == WRITE)
1090 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1091 if (copy)
1092 dst = copy + bv->bv_offset;
1093 }
1094 for (off = 0; off < bv->bv_len; off += blksize) {
1095 sector_t trkid = recid;
1096 unsigned int recoffs = sector_div(trkid, blk_per_trk);
1097 rcmd = cmd;
1098 count = blksize;
1099 /* Locate record for cdl special block ? */
1100 if (private->uses_cdl && recid < 2*blk_per_trk) {
1101 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1102 rcmd |= 0x8;
1103 count = dasd_eckd_cdl_reclen(recid);
1104 if (count < blksize)
1105 memset(dst + count, 0xe5,
1106 blksize - count);
1107 }
1108 ccw[-1].flags |= CCW_FLAG_CC;
1109 locate_record(ccw++, LO_data++,
1110 trkid, recoffs + 1,
1111 1, rcmd, device, count);
1112 }
1113 /* Locate record for standard blocks ? */
1114 if (private->uses_cdl && recid == 2*blk_per_trk) {
1115 ccw[-1].flags |= CCW_FLAG_CC;
1116 locate_record(ccw++, LO_data++,
1117 trkid, recoffs + 1,
1118 last_rec - recid + 1,
1119 cmd, device, count);
1120 }
1121 /* Read/write ccw. */
1122 ccw[-1].flags |= CCW_FLAG_CC;
1123 ccw->cmd_code = rcmd;
1124 ccw->count = count;
1125 if (idal_is_needed(dst, blksize)) {
1126 ccw->cda = (__u32)(addr_t) idaws;
1127 ccw->flags = CCW_FLAG_IDA;
1128 idaws = idal_create_words(idaws, dst, blksize);
1129 } else {
1130 ccw->cda = (__u32)(addr_t) dst;
1131 ccw->flags = 0;
1132 }
1133 ccw++;
1134 dst += blksize;
1135 recid++;
1136 }
1137 }
1138 cqr->device = device;
1139 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1140 cqr->lpm = private->path_data.ppm;
1141 cqr->retries = 256;
1142 cqr->buildclk = get_clock();
1143 cqr->status = DASD_CQR_FILLED;
1144 return cqr;
1145}
1146
1147static int
1148dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1149{
1150 struct dasd_eckd_private *private;
1151 struct ccw1 *ccw;
1152 struct bio *bio;
1153 struct bio_vec *bv;
1154 char *dst, *cda;
1155 unsigned int blksize, blk_per_trk, off;
1156 sector_t recid;
1157 int i, status;
1158
1159 if (!dasd_page_cache)
1160 goto out;
1161 private = (struct dasd_eckd_private *) cqr->device->private;
1162 blksize = cqr->device->bp_block;
1163 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1164 recid = req->sector >> cqr->device->s2b_shift;
1165 ccw = cqr->cpaddr;
1166 /* Skip over define extent & locate record. */
1167 ccw++;
1168 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1169 ccw++;
1170 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1171 dst = page_address(bv->bv_page) + bv->bv_offset;
1172 for (off = 0; off < bv->bv_len; off += blksize) {
1173 /* Skip locate record. */
1174 if (private->uses_cdl && recid <= 2*blk_per_trk)
1175 ccw++;
1176 if (dst) {
1177 if (ccw->flags & CCW_FLAG_IDA)
1178 cda = *((char **)((addr_t) ccw->cda));
1179 else
1180 cda = (char *)((addr_t) ccw->cda);
1181 if (dst != cda) {
1182 if (rq_data_dir(req) == READ)
1183 memcpy(dst, cda, bv->bv_len);
1184 kmem_cache_free(dasd_page_cache,
1185 (void *)((addr_t)cda & PAGE_MASK));
1186 }
1187 dst = NULL;
1188 }
1189 ccw++;
1190 recid++;
1191 }
1192 }
1193out:
1194 status = cqr->status == DASD_CQR_DONE;
1195 dasd_sfree_request(cqr, cqr->device);
1196 return status;
1197}
1198
1199static int
1200dasd_eckd_fill_info(struct dasd_device * device,
1201 struct dasd_information2_t * info)
1202{
1203 struct dasd_eckd_private *private;
1204
1205 private = (struct dasd_eckd_private *) device->private;
1206 info->label_block = 2;
1207 info->FBA_layout = private->uses_cdl ? 0 : 1;
1208 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
1209 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1210 memcpy(info->characteristics, &private->rdc_data,
1211 sizeof(struct dasd_eckd_characteristics));
1212 info->confdata_size = sizeof (struct dasd_eckd_confdata);
1213 memcpy(info->configuration_data, &private->conf_data,
1214 sizeof (struct dasd_eckd_confdata));
1215 return 0;
1216}
1217
1218/*
1219 * SECTION: ioctl functions for eckd devices.
1220 */
1221
1222/*
1223 * Release device ioctl.
1224 * Buils a channel programm to releases a prior reserved
1225 * (see dasd_eckd_reserve) device.
1226 */
1227static int
1228dasd_eckd_release(struct block_device *bdev, int no, long args)
1229{
1230 struct dasd_device *device;
1231 struct dasd_ccw_req *cqr;
1232 int rc;
1233
1234 if (!capable(CAP_SYS_ADMIN))
1235 return -EACCES;
1236
1237 device = bdev->bd_disk->private_data;
1238 if (device == NULL)
1239 return -ENODEV;
1240
1241 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1242 1, 32, device);
1243 if (IS_ERR(cqr)) {
1244 DEV_MESSAGE(KERN_WARNING, device, "%s",
1245 "Could not allocate initialization request");
1246 return PTR_ERR(cqr);
1247 }
1248 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE;
1249 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1250 cqr->cpaddr->count = 32;
1251 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1252 cqr->device = device;
1253 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1254 cqr->retries = 0;
1255 cqr->expires = 2 * HZ;
1256 cqr->buildclk = get_clock();
1257 cqr->status = DASD_CQR_FILLED;
1258
1259 rc = dasd_sleep_on_immediatly(cqr);
1260
1261 dasd_sfree_request(cqr, cqr->device);
1262 return rc;
1263}
1264
1265/*
1266 * Reserve device ioctl.
1267 * Options are set to 'synchronous wait for interrupt' and
1268 * 'timeout the request'. This leads to a terminate IO if
1269 * the interrupt is outstanding for a certain time.
1270 */
1271static int
1272dasd_eckd_reserve(struct block_device *bdev, int no, long args)
1273{
1274 struct dasd_device *device;
1275 struct dasd_ccw_req *cqr;
1276 int rc;
1277
1278 if (!capable(CAP_SYS_ADMIN))
1279 return -EACCES;
1280
1281 device = bdev->bd_disk->private_data;
1282 if (device == NULL)
1283 return -ENODEV;
1284
1285 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1286 1, 32, device);
1287 if (IS_ERR(cqr)) {
1288 DEV_MESSAGE(KERN_WARNING, device, "%s",
1289 "Could not allocate initialization request");
1290 return PTR_ERR(cqr);
1291 }
1292 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE;
1293 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1294 cqr->cpaddr->count = 32;
1295 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1296 cqr->device = device;
1297 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1298 cqr->retries = 0;
1299 cqr->expires = 2 * HZ;
1300 cqr->buildclk = get_clock();
1301 cqr->status = DASD_CQR_FILLED;
1302
1303 rc = dasd_sleep_on_immediatly(cqr);
1304
1305 dasd_sfree_request(cqr, cqr->device);
1306 return rc;
1307}
1308
1309/*
1310 * Steal lock ioctl - unconditional reserve device.
1311 * Buils a channel programm to break a device's reservation.
1312 * (unconditional reserve)
1313 */
1314static int
1315dasd_eckd_steal_lock(struct block_device *bdev, int no, long args)
1316{
1317 struct dasd_device *device;
1318 struct dasd_ccw_req *cqr;
1319 int rc;
1320
1321 if (!capable(CAP_SYS_ADMIN))
1322 return -EACCES;
1323
1324 device = bdev->bd_disk->private_data;
1325 if (device == NULL)
1326 return -ENODEV;
1327
1328 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1329 1, 32, device);
1330 if (IS_ERR(cqr)) {
1331 DEV_MESSAGE(KERN_WARNING, device, "%s",
1332 "Could not allocate initialization request");
1333 return PTR_ERR(cqr);
1334 }
1335 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK;
1336 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1337 cqr->cpaddr->count = 32;
1338 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1339 cqr->device = device;
1340 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1341 cqr->retries = 0;
1342 cqr->expires = 2 * HZ;
1343 cqr->buildclk = get_clock();
1344 cqr->status = DASD_CQR_FILLED;
1345
1346 rc = dasd_sleep_on_immediatly(cqr);
1347
1348 dasd_sfree_request(cqr, cqr->device);
1349 return rc;
1350}
1351
1352/*
1353 * Read performance statistics
1354 */
1355static int
1356dasd_eckd_performance(struct block_device *bdev, int no, long args)
1357{
1358 struct dasd_device *device;
1359 struct dasd_psf_prssd_data *prssdp;
1360 struct dasd_rssd_perf_stats_t *stats;
1361 struct dasd_ccw_req *cqr;
1362 struct ccw1 *ccw;
1363 int rc;
1364
1365 device = bdev->bd_disk->private_data;
1366 if (device == NULL)
1367 return -ENODEV;
1368
1369 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1370 1 /* PSF */ + 1 /* RSSD */ ,
1371 (sizeof (struct dasd_psf_prssd_data) +
1372 sizeof (struct dasd_rssd_perf_stats_t)),
1373 device);
1374 if (IS_ERR(cqr)) {
1375 DEV_MESSAGE(KERN_WARNING, device, "%s",
1376 "Could not allocate initialization request");
1377 return PTR_ERR(cqr);
1378 }
1379 cqr->device = device;
1380 cqr->retries = 0;
1381 cqr->expires = 10 * HZ;
1382
1383 /* Prepare for Read Subsystem Data */
1384 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1385 memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data));
1386 prssdp->order = PSF_ORDER_PRSSD;
1387 prssdp->suborder = 0x01; /* Perfomance Statistics */
1388 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
1389
1390 ccw = cqr->cpaddr;
1391 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1392 ccw->count = sizeof (struct dasd_psf_prssd_data);
1393 ccw->flags |= CCW_FLAG_CC;
1394 ccw->cda = (__u32)(addr_t) prssdp;
1395
1396 /* Read Subsystem Data - Performance Statistics */
1397 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1398 memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t));
1399
1400 ccw++;
1401 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1402 ccw->count = sizeof (struct dasd_rssd_perf_stats_t);
1403 ccw->cda = (__u32)(addr_t) stats;
1404
1405 cqr->buildclk = get_clock();
1406 cqr->status = DASD_CQR_FILLED;
1407 rc = dasd_sleep_on(cqr);
1408 if (rc == 0) {
1409 /* Prepare for Read Subsystem Data */
1410 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1411 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1412 rc = copy_to_user((long __user *) args, (long *) stats,
1413 sizeof(struct dasd_rssd_perf_stats_t));
1414 }
1415 dasd_sfree_request(cqr, cqr->device);
1416 return rc;
1417}
1418
1419/*
1420 * Get attributes (cache operations)
1421 * Returnes the cache attributes used in Define Extend (DE).
1422 */
1423static int
1424dasd_eckd_get_attrib (struct block_device *bdev, int no, long args)
1425{
1426 struct dasd_device *device;
1427 struct dasd_eckd_private *private;
1428 struct attrib_data_t attrib;
1429 int rc;
1430
1431 if (!capable(CAP_SYS_ADMIN))
1432 return -EACCES;
1433 if (!args)
1434 return -EINVAL;
1435
1436 device = bdev->bd_disk->private_data;
1437 if (device == NULL)
1438 return -ENODEV;
1439
1440 private = (struct dasd_eckd_private *) device->private;
1441 attrib = private->attrib;
1442
1443 rc = copy_to_user((long __user *) args, (long *) &attrib,
1444 sizeof (struct attrib_data_t));
1445
1446 return rc;
1447}
1448
1449/*
1450 * Set attributes (cache operations)
1451 * Stores the attributes for cache operation to be used in Define Extend (DE).
1452 */
1453static int
1454dasd_eckd_set_attrib(struct block_device *bdev, int no, long args)
1455{
1456 struct dasd_device *device;
1457 struct dasd_eckd_private *private;
1458 struct attrib_data_t attrib;
1459
1460 if (!capable(CAP_SYS_ADMIN))
1461 return -EACCES;
1462 if (!args)
1463 return -EINVAL;
1464
1465 device = bdev->bd_disk->private_data;
1466 if (device == NULL)
1467 return -ENODEV;
1468
1469 if (copy_from_user(&attrib, (void __user *) args,
1470 sizeof (struct attrib_data_t))) {
1471 return -EFAULT;
1472 }
1473 private = (struct dasd_eckd_private *) device->private;
1474 private->attrib = attrib;
1475
1476 DEV_MESSAGE(KERN_INFO, device,
1477 "cache operation mode set to %x (%i cylinder prestage)",
1478 private->attrib.operation, private->attrib.nr_cyl);
1479 return 0;
1480}
1481
1482/*
1483 * Print sense data and related channel program.
1484 * Parts are printed because printk buffer is only 1024 bytes.
1485 */
1486static void
1487dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
1488 struct irb *irb)
1489{
1490 char *page;
1491 struct ccw1 *act, *end, *last;
1492 int len, sl, sct, count;
1493
1494 page = (char *) get_zeroed_page(GFP_ATOMIC);
1495 if (page == NULL) {
1496 DEV_MESSAGE(KERN_ERR, device, " %s",
1497 "No memory to dump sense data");
1498 return;
1499 }
1500 len = sprintf(page, KERN_ERR PRINTK_HEADER
1501 " I/O status report for device %s:\n",
1502 device->cdev->dev.bus_id);
1503 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1504 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
1505 irb->scsw.cstat, irb->scsw.dstat);
1506 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1507 " device %s: Failing CCW: %p\n",
1508 device->cdev->dev.bus_id,
1509 (void *) (addr_t) irb->scsw.cpa);
1510 if (irb->esw.esw0.erw.cons) {
1511 for (sl = 0; sl < 4; sl++) {
1512 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1513 " Sense(hex) %2d-%2d:",
1514 (8 * sl), ((8 * sl) + 7));
1515
1516 for (sct = 0; sct < 8; sct++) {
1517 len += sprintf(page + len, " %02x",
1518 irb->ecw[8 * sl + sct]);
1519 }
1520 len += sprintf(page + len, "\n");
1521 }
1522
1523 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
1524 /* 24 Byte Sense Data */
1525 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1526 " 24 Byte: %x MSG %x, "
1527 "%s MSGb to SYSOP\n",
1528 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
1529 irb->ecw[1] & 0x10 ? "" : "no");
1530 } else {
1531 /* 32 Byte Sense Data */
1532 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1533 " 32 Byte: Format: %x "
1534 "Exception class %x\n",
1535 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
1536 }
1537 } else {
1538 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1539 " SORRY - NO VALID SENSE AVAILABLE\n");
1540 }
1541 MESSAGE_LOG(KERN_ERR, "%s",
1542 page + sizeof(KERN_ERR PRINTK_HEADER));
1543
1544 /* dump the Channel Program */
1545 /* print first CCWs (maximum 8) */
1546 act = req->cpaddr;
1547 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
1548 end = min(act + 8, last);
1549 len = sprintf(page, KERN_ERR PRINTK_HEADER
1550 " Related CP in req: %p\n", req);
1551 while (act <= end) {
1552 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1553 " CCW %p: %08X %08X DAT:",
1554 act, ((int *) act)[0], ((int *) act)[1]);
1555 for (count = 0; count < 32 && count < act->count;
1556 count += sizeof(int))
1557 len += sprintf(page + len, " %08X",
1558 ((int *) (addr_t) act->cda)
1559 [(count>>2)]);
1560 len += sprintf(page + len, "\n");
1561 act++;
1562 }
1563 MESSAGE_LOG(KERN_ERR, "%s",
1564 page + sizeof(KERN_ERR PRINTK_HEADER));
1565
1566 /* print failing CCW area */
1567 len = 0;
1568 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
1569 act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
1570 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
1571 }
1572 end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
1573 while (act <= end) {
1574 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1575 " CCW %p: %08X %08X DAT:",
1576 act, ((int *) act)[0], ((int *) act)[1]);
1577 for (count = 0; count < 32 && count < act->count;
1578 count += sizeof(int))
1579 len += sprintf(page + len, " %08X",
1580 ((int *) (addr_t) act->cda)
1581 [(count>>2)]);
1582 len += sprintf(page + len, "\n");
1583 act++;
1584 }
1585
1586 /* print last CCWs */
1587 if (act < last - 2) {
1588 act = last - 2;
1589 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
1590 }
1591 while (act <= last) {
1592 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1593 " CCW %p: %08X %08X DAT:",
1594 act, ((int *) act)[0], ((int *) act)[1]);
1595 for (count = 0; count < 32 && count < act->count;
1596 count += sizeof(int))
1597 len += sprintf(page + len, " %08X",
1598 ((int *) (addr_t) act->cda)
1599 [(count>>2)]);
1600 len += sprintf(page + len, "\n");
1601 act++;
1602 }
1603 if (len > 0)
1604 MESSAGE_LOG(KERN_ERR, "%s",
1605 page + sizeof(KERN_ERR PRINTK_HEADER));
1606 free_page((unsigned long) page);
1607}
1608
1609/*
1610 * max_blocks is dependent on the amount of storage that is available
1611 * in the static io buffer for each device. Currently each device has
1612 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
1613 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
1614 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
1615 * addition we have one define extent ccw + 16 bytes of data and one
1616 * locate record ccw + 16 bytes of data. That makes:
1617 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
1618 * We want to fit two into the available memory so that we can immediately
1619 * start the next request if one finishes off. That makes 249.5 blocks
1620 * for one request. Give a little safety and the result is 240.
1621 */
1622static struct dasd_discipline dasd_eckd_discipline = {
1623 .owner = THIS_MODULE,
1624 .name = "ECKD",
1625 .ebcname = "ECKD",
1626 .max_blocks = 240,
1627 .check_device = dasd_eckd_check_characteristics,
1628 .do_analysis = dasd_eckd_do_analysis,
1629 .fill_geometry = dasd_eckd_fill_geometry,
1630 .start_IO = dasd_start_IO,
1631 .term_IO = dasd_term_IO,
1632 .format_device = dasd_eckd_format_device,
1633 .examine_error = dasd_eckd_examine_error,
1634 .erp_action = dasd_eckd_erp_action,
1635 .erp_postaction = dasd_eckd_erp_postaction,
1636 .build_cp = dasd_eckd_build_cp,
1637 .free_cp = dasd_eckd_free_cp,
1638 .dump_sense = dasd_eckd_dump_sense,
1639 .fill_info = dasd_eckd_fill_info,
1640};
1641
1642static int __init
1643dasd_eckd_init(void)
1644{
1645 int ret;
1646
1647 dasd_ioctl_no_register(THIS_MODULE, BIODASDGATTR,
1648 dasd_eckd_get_attrib);
1649 dasd_ioctl_no_register(THIS_MODULE, BIODASDSATTR,
1650 dasd_eckd_set_attrib);
1651 dasd_ioctl_no_register(THIS_MODULE, BIODASDPSRD,
1652 dasd_eckd_performance);
1653 dasd_ioctl_no_register(THIS_MODULE, BIODASDRLSE,
1654 dasd_eckd_release);
1655 dasd_ioctl_no_register(THIS_MODULE, BIODASDRSRV,
1656 dasd_eckd_reserve);
1657 dasd_ioctl_no_register(THIS_MODULE, BIODASDSLCK,
1658 dasd_eckd_steal_lock);
1659
1660 ASCEBC(dasd_eckd_discipline.ebcname, 4);
1661
1662 ret = ccw_driver_register(&dasd_eckd_driver);
1663 if (ret) {
1664 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR,
1665 dasd_eckd_get_attrib);
1666 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR,
1667 dasd_eckd_set_attrib);
1668 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD,
1669 dasd_eckd_performance);
1670 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE,
1671 dasd_eckd_release);
1672 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV,
1673 dasd_eckd_reserve);
1674 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK,
1675 dasd_eckd_steal_lock);
1676 return ret;
1677 }
1678
1679 dasd_generic_auto_online(&dasd_eckd_driver);
1680 return 0;
1681}
1682
1683static void __exit
1684dasd_eckd_cleanup(void)
1685{
1686 ccw_driver_unregister(&dasd_eckd_driver);
1687
1688 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDGATTR,
1689 dasd_eckd_get_attrib);
1690 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSATTR,
1691 dasd_eckd_set_attrib);
1692 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDPSRD,
1693 dasd_eckd_performance);
1694 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRLSE,
1695 dasd_eckd_release);
1696 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDRSRV,
1697 dasd_eckd_reserve);
1698 dasd_ioctl_no_unregister(THIS_MODULE, BIODASDSLCK,
1699 dasd_eckd_steal_lock);
1700}
1701
1702module_init(dasd_eckd_init);
1703module_exit(dasd_eckd_cleanup);
1704
1705/*
1706 * Overrides for Emacs so that we follow Linus's tabbing style.
1707 * Emacs will notice this stuff at the end of the file and automatically
1708 * adjust the settings for this buffer only. This must remain at the end
1709 * of the file.
1710 * ---------------------------------------------------------------------------
1711 * Local variables:
1712 * c-indent-level: 4
1713 * c-brace-imaginary-offset: 0
1714 * c-brace-offset: -4
1715 * c-argdecl-indent: 4
1716 * c-label-offset: -4
1717 * c-continued-statement-offset: 4
1718 * c-continued-brace-offset: 0
1719 * indent-tabs-mode: 1
1720 * tab-width: 8
1721 * End:
1722 */
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
new file mode 100644
index 000000000000..b6888c68b224
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.h
@@ -0,0 +1,346 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_eckd.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Bugreports.to..: <Linux390@de.ibm.com>
6 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
7 *
8 * $Revision: 1.10 $
9 */
10
11#ifndef DASD_ECKD_H
12#define DASD_ECKD_H
13
14/*****************************************************************************
15 * SECTION: CCW Definitions
16 ****************************************************************************/
17#define DASD_ECKD_CCW_WRITE 0x05
18#define DASD_ECKD_CCW_READ 0x06
19#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
20#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
21#define DASD_ECKD_CCW_WRITE_KD 0x0d
22#define DASD_ECKD_CCW_READ_KD 0x0e
23#define DASD_ECKD_CCW_ERASE 0x11
24#define DASD_ECKD_CCW_READ_COUNT 0x12
25#define DASD_ECKD_CCW_SLCK 0x14
26#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
27#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
28#define DASD_ECKD_CCW_WRITE_CKD 0x1d
29#define DASD_ECKD_CCW_READ_CKD 0x1e
30#define DASD_ECKD_CCW_PSF 0x27
31#define DASD_ECKD_CCW_RSSD 0x3e
32#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
33#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
34#define DASD_ECKD_CCW_WRITE_MT 0x85
35#define DASD_ECKD_CCW_READ_MT 0x86
36#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
37#define DASD_ECKD_CCW_READ_KD_MT 0x8e
38#define DASD_ECKD_CCW_RELEASE 0x94
39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
41#define DASD_ECKD_CCW_RESERVE 0xB4
42
43/*
44 *Perform Subsystem Function / Sub-Orders
45 */
46#define PSF_ORDER_PRSSD 0x18
47
48/*****************************************************************************
49 * SECTION: Type Definitions
50 ****************************************************************************/
51
52struct eckd_count {
53 __u16 cyl;
54 __u16 head;
55 __u8 record;
56 __u8 kl;
57 __u16 dl;
58} __attribute__ ((packed));
59
60struct ch_t {
61 __u16 cyl;
62 __u16 head;
63} __attribute__ ((packed));
64
65struct chs_t {
66 __u16 cyl;
67 __u16 head;
68 __u32 sector;
69} __attribute__ ((packed));
70
71struct chr_t {
72 __u16 cyl;
73 __u16 head;
74 __u8 record;
75} __attribute__ ((packed));
76
77struct geom_t {
78 __u16 cyl;
79 __u16 head;
80 __u32 sector;
81} __attribute__ ((packed));
82
83struct eckd_home {
84 __u8 skip_control[14];
85 __u16 cell_number;
86 __u8 physical_addr[3];
87 __u8 flag;
88 struct ch_t track_addr;
89 __u8 reserved;
90 __u8 key_length;
91 __u8 reserved2[2];
92} __attribute__ ((packed));
93
94struct DE_eckd_data {
95 struct {
96 unsigned char perm:2; /* Permissions on this extent */
97 unsigned char reserved:1;
98 unsigned char seek:2; /* Seek control */
99 unsigned char auth:2; /* Access authorization */
100 unsigned char pci:1; /* PCI Fetch mode */
101 } __attribute__ ((packed)) mask;
102 struct {
103 unsigned char mode:2; /* Architecture mode */
104 unsigned char ckd:1; /* CKD Conversion */
105 unsigned char operation:3; /* Operation mode */
106 unsigned char cfw:1; /* Cache fast write */
107 unsigned char dfw:1; /* DASD fast write */
108 } __attribute__ ((packed)) attributes;
109 __u16 blk_size; /* Blocksize */
110 __u16 fast_write_id;
111 __u8 ga_additional; /* Global Attributes Additional */
112 __u8 ga_extended; /* Global Attributes Extended */
113 struct ch_t beg_ext;
114 struct ch_t end_ext;
115 unsigned long long ep_sys_time; /* Ext Parameter - System Time Stamp */
116 __u8 ep_format; /* Extended Parameter format byte */
117 __u8 ep_prio; /* Extended Parameter priority I/O byte */
118 __u8 ep_reserved[6]; /* Extended Parameter Reserved */
119} __attribute__ ((packed));
120
121struct LO_eckd_data {
122 struct {
123 unsigned char orientation:2;
124 unsigned char operation:6;
125 } __attribute__ ((packed)) operation;
126 struct {
127 unsigned char last_bytes_used:1;
128 unsigned char reserved:6;
129 unsigned char read_count_suffix:1;
130 } __attribute__ ((packed)) auxiliary;
131 __u8 unused;
132 __u8 count;
133 struct ch_t seek_addr;
134 struct chr_t search_arg;
135 __u8 sector;
136 __u16 length;
137} __attribute__ ((packed));
138
139struct dasd_eckd_characteristics {
140 __u16 cu_type;
141 struct {
142 unsigned char support:2;
143 unsigned char async:1;
144 unsigned char reserved:1;
145 unsigned char cache_info:1;
146 unsigned char model:3;
147 } __attribute__ ((packed)) cu_model;
148 __u16 dev_type;
149 __u8 dev_model;
150 struct {
151 unsigned char mult_burst:1;
152 unsigned char RT_in_LR:1;
153 unsigned char reserved1:1;
154 unsigned char RD_IN_LR:1;
155 unsigned char reserved2:4;
156 unsigned char reserved3:8;
157 unsigned char defect_wr:1;
158 unsigned char XRC_supported:1;
159 unsigned char reserved4:1;
160 unsigned char striping:1;
161 unsigned char reserved5:4;
162 unsigned char cfw:1;
163 unsigned char reserved6:2;
164 unsigned char cache:1;
165 unsigned char dual_copy:1;
166 unsigned char dfw:1;
167 unsigned char reset_alleg:1;
168 unsigned char sense_down:1;
169 } __attribute__ ((packed)) facilities;
170 __u8 dev_class;
171 __u8 unit_type;
172 __u16 no_cyl;
173 __u16 trk_per_cyl;
174 __u8 sec_per_trk;
175 __u8 byte_per_track[3];
176 __u16 home_bytes;
177 __u8 formula;
178 union {
179 struct {
180 __u8 f1;
181 __u16 f2;
182 __u16 f3;
183 } __attribute__ ((packed)) f_0x01;
184 struct {
185 __u8 f1;
186 __u8 f2;
187 __u8 f3;
188 __u8 f4;
189 __u8 f5;
190 } __attribute__ ((packed)) f_0x02;
191 } __attribute__ ((packed)) factors;
192 __u16 first_alt_trk;
193 __u16 no_alt_trk;
194 __u16 first_dia_trk;
195 __u16 no_dia_trk;
196 __u16 first_sup_trk;
197 __u16 no_sup_trk;
198 __u8 MDR_ID;
199 __u8 OBR_ID;
200 __u8 director;
201 __u8 rd_trk_set;
202 __u16 max_rec_zero;
203 __u8 reserved1;
204 __u8 RWANY_in_LR;
205 __u8 factor6;
206 __u8 factor7;
207 __u8 factor8;
208 __u8 reserved2[3];
209 __u8 reserved3[10];
210} __attribute__ ((packed));
211
212struct dasd_eckd_confdata {
213 struct {
214 struct {
215 unsigned char identifier:2;
216 unsigned char token_id:1;
217 unsigned char sno_valid:1;
218 unsigned char subst_sno:1;
219 unsigned char recNED:1;
220 unsigned char emuNED:1;
221 unsigned char reserved:1;
222 } __attribute__ ((packed)) flags;
223 __u8 descriptor;
224 __u8 dev_class;
225 __u8 reserved;
226 unsigned char dev_type[6];
227 unsigned char dev_model[3];
228 unsigned char HDA_manufacturer[3];
229 unsigned char HDA_location[2];
230 unsigned char HDA_seqno[12];
231 __u16 ID;
232 } __attribute__ ((packed)) ned1;
233 struct {
234 struct {
235 unsigned char identifier:2;
236 unsigned char token_id:1;
237 unsigned char sno_valid:1;
238 unsigned char subst_sno:1;
239 unsigned char recNED:1;
240 unsigned char emuNED:1;
241 unsigned char reserved:1;
242 } __attribute__ ((packed)) flags;
243 __u8 descriptor;
244 __u8 reserved[2];
245 unsigned char dev_type[6];
246 unsigned char dev_model[3];
247 unsigned char DASD_manufacturer[3];
248 unsigned char DASD_location[2];
249 unsigned char DASD_seqno[12];
250 __u16 ID;
251 } __attribute__ ((packed)) ned2;
252 struct {
253 struct {
254 unsigned char identifier:2;
255 unsigned char token_id:1;
256 unsigned char sno_valid:1;
257 unsigned char subst_sno:1;
258 unsigned char recNED:1;
259 unsigned char emuNED:1;
260 unsigned char reserved:1;
261 } __attribute__ ((packed)) flags;
262 __u8 descriptor;
263 __u8 reserved[2];
264 unsigned char cont_type[6];
265 unsigned char cont_model[3];
266 unsigned char cont_manufacturer[3];
267 unsigned char cont_location[2];
268 unsigned char cont_seqno[12];
269 __u16 ID;
270 } __attribute__ ((packed)) ned3;
271 struct {
272 struct {
273 unsigned char identifier:2;
274 unsigned char token_id:1;
275 unsigned char sno_valid:1;
276 unsigned char subst_sno:1;
277 unsigned char recNED:1;
278 unsigned char emuNED:1;
279 unsigned char reserved:1;
280 } __attribute__ ((packed)) flags;
281 __u8 descriptor;
282 __u8 reserved[2];
283 unsigned char cont_type[6];
284 unsigned char empty[3];
285 unsigned char cont_manufacturer[3];
286 unsigned char cont_location[2];
287 unsigned char cont_seqno[12];
288 __u16 ID;
289 } __attribute__ ((packed)) ned4;
290 unsigned char ned5[32];
291 unsigned char ned6[32];
292 unsigned char ned7[32];
293 struct {
294 struct {
295 unsigned char identifier:2;
296 unsigned char reserved:6;
297 } __attribute__ ((packed)) flags;
298 __u8 selector;
299 __u16 interfaceID;
300 __u32 reserved;
301 __u16 subsystemID;
302 struct {
303 unsigned char sp0:1;
304 unsigned char sp1:1;
305 unsigned char reserved:5;
306 unsigned char scluster:1;
307 } __attribute__ ((packed)) spathID;
308 __u8 unit_address;
309 __u8 dev_ID;
310 __u8 dev_address;
311 __u8 adapterID;
312 __u16 link_address;
313 struct {
314 unsigned char parallel:1;
315 unsigned char escon:1;
316 unsigned char reserved:1;
317 unsigned char ficon:1;
318 unsigned char reserved2:4;
319 } __attribute__ ((packed)) protocol_type;
320 struct {
321 unsigned char PID_in_236:1;
322 unsigned char reserved:7;
323 } __attribute__ ((packed)) format_flags;
324 __u8 log_dev_address;
325 unsigned char reserved2[12];
326 } __attribute__ ((packed)) neq;
327} __attribute__ ((packed));
328
329struct dasd_eckd_path {
330 __u8 opm;
331 __u8 ppm;
332 __u8 npm;
333};
334
335/*
336 * Perform Subsystem Function - Prepare for Read Subsystem Data
337 */
338struct dasd_psf_prssd_data {
339 unsigned char order;
340 unsigned char flags;
341 unsigned char reserved[4];
342 unsigned char suborder;
343 unsigned char varies[9];
344} __attribute__ ((packed));
345
346#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
new file mode 100644
index 000000000000..7cb98d25f341
--- /dev/null
+++ b/drivers/s390/block/dasd_erp.c
@@ -0,0 +1,254 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * $Revision: 1.14 $
11 */
12
13#include <linux/config.h>
14#include <linux/ctype.h>
15#include <linux/init.h>
16
17#include <asm/debug.h>
18#include <asm/ebcdic.h>
19#include <asm/uaccess.h>
20
21/* This is ugly... */
22#define PRINTK_HEADER "dasd_erp:"
23
24#include "dasd_int.h"
25
26struct dasd_ccw_req *
27dasd_alloc_erp_request(char *magic, int cplength, int datasize,
28 struct dasd_device * device)
29{
30 unsigned long flags;
31 struct dasd_ccw_req *cqr;
32 char *data;
33 int size;
34
35 /* Sanity checks */
36 if ( magic == NULL || datasize > PAGE_SIZE ||
37 (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
38 BUG();
39
40 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
41 if (cplength > 0)
42 size += cplength * sizeof(struct ccw1);
43 if (datasize > 0)
44 size += datasize;
45 spin_lock_irqsave(&device->mem_lock, flags);
46 cqr = (struct dasd_ccw_req *)
47 dasd_alloc_chunk(&device->erp_chunks, size);
48 spin_unlock_irqrestore(&device->mem_lock, flags);
49 if (cqr == NULL)
50 return ERR_PTR(-ENOMEM);
51 memset(cqr, 0, sizeof(struct dasd_ccw_req));
52 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
53 cqr->cpaddr = NULL;
54 if (cplength > 0) {
55 cqr->cpaddr = (struct ccw1 *) data;
56 data += cplength*sizeof(struct ccw1);
57 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
58 }
59 cqr->data = NULL;
60 if (datasize > 0) {
61 cqr->data = data;
62 memset(cqr->data, 0, datasize);
63 }
64 strncpy((char *) &cqr->magic, magic, 4);
65 ASCEBC((char *) &cqr->magic, 4);
66 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
67 dasd_get_device(device);
68 return cqr;
69}
70
71void
72dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
73{
74 unsigned long flags;
75
76 spin_lock_irqsave(&device->mem_lock, flags);
77 dasd_free_chunk(&device->erp_chunks, cqr);
78 spin_unlock_irqrestore(&device->mem_lock, flags);
79 atomic_dec(&device->ref_count);
80}
81
82
83/*
84 * dasd_default_erp_action just retries the current cqr
85 */
86struct dasd_ccw_req *
87dasd_default_erp_action(struct dasd_ccw_req * cqr)
88{
89 struct dasd_device *device;
90
91 device = cqr->device;
92
93 /* just retry - there is nothing to save ... I got no sense data.... */
94 if (cqr->retries > 0) {
95 DEV_MESSAGE (KERN_DEBUG, device,
96 "default ERP called (%i retries left)",
97 cqr->retries);
98 cqr->lpm = LPM_ANYPATH;
99 cqr->status = DASD_CQR_QUEUED;
100 } else {
101 DEV_MESSAGE (KERN_WARNING, device, "%s",
102 "default ERP called (NO retry left)");
103 cqr->status = DASD_CQR_FAILED;
104 cqr->stopclk = get_clock ();
105 }
106 return cqr;
107} /* end dasd_default_erp_action */
108
109/*
110 * DESCRIPTION
111 * Frees all ERPs of the current ERP Chain and set the status
112 * of the original CQR either to DASD_CQR_DONE if ERP was successful
113 * or to DASD_CQR_FAILED if ERP was NOT successful.
114 * NOTE: This function is only called if no discipline postaction
115 * is available
116 *
117 * PARAMETER
118 * erp current erp_head
119 *
120 * RETURN VALUES
121 * cqr pointer to the original CQR
122 */
123struct dasd_ccw_req *
124dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
125{
126 struct dasd_device *device;
127 int success;
128
129 if (cqr->refers == NULL || cqr->function == NULL)
130 BUG();
131
132 device = cqr->device;
133 success = cqr->status == DASD_CQR_DONE;
134
135 /* free all ERPs - but NOT the original cqr */
136 while (cqr->refers != NULL) {
137 struct dasd_ccw_req *refers;
138
139 refers = cqr->refers;
140 /* remove the request from the device queue */
141 list_del(&cqr->list);
142 /* free the finished erp request */
143 dasd_free_erp_request(cqr, device);
144 cqr = refers;
145 }
146
147 /* set corresponding status to original cqr */
148 if (success)
149 cqr->status = DASD_CQR_DONE;
150 else {
151 cqr->status = DASD_CQR_FAILED;
152 cqr->stopclk = get_clock();
153 }
154
155 return cqr;
156
157} /* end default_erp_postaction */
158
159/*
160 * Print the hex dump of the memory used by a request. This includes
161 * all error recovery ccws that have been chained in from of the
162 * real request.
163 */
164static inline void
165hex_dump_memory(struct dasd_device *device, void *data, int len)
166{
167 int *pint;
168
169 pint = (int *) data;
170 while (len > 0) {
171 DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x",
172 pint, pint[0], pint[1], pint[2], pint[3]);
173 pint += 4;
174 len -= 16;
175 }
176}
177
178void
179dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
180{
181 struct dasd_device *device;
182
183 device = cqr->device;
184 /* dump sense data */
185 if (device->discipline && device->discipline->dump_sense)
186 device->discipline->dump_sense(device, cqr, irb);
187}
188
189void
190dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa)
191{
192 struct dasd_device *device;
193 struct dasd_ccw_req *lcqr;
194 struct ccw1 *ccw;
195 int cplength;
196
197 device = cqr->device;
198 /* log the channel program */
199 for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) {
200 DEV_MESSAGE(KERN_ERR, device,
201 "(%s) ERP chain report for req: %p",
202 caller == 0 ? "EXAMINE" : "ACTION", lcqr);
203 hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req));
204
205 cplength = 1;
206 ccw = lcqr->cpaddr;
207 while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC))
208 cplength++;
209
210 if (cplength > 40) { /* log only parts of the CP */
211 DEV_MESSAGE(KERN_ERR, device, "%s",
212 "Start of channel program:");
213 hex_dump_memory(device, lcqr->cpaddr,
214 40*sizeof(struct ccw1));
215
216 DEV_MESSAGE(KERN_ERR, device, "%s",
217 "End of channel program:");
218 hex_dump_memory(device, lcqr->cpaddr + cplength - 10,
219 10*sizeof(struct ccw1));
220 } else { /* log the whole CP */
221 DEV_MESSAGE(KERN_ERR, device, "%s",
222 "Channel program (complete):");
223 hex_dump_memory(device, lcqr->cpaddr,
224 cplength*sizeof(struct ccw1));
225 }
226
227 if (lcqr != cqr)
228 continue;
229
230 /*
231 * Log bytes arround failed CCW but only if we did
232 * not log the whole CP of the CCW is outside the
233 * logged CP.
234 */
235 if (cplength > 40 ||
236 ((addr_t) cpa < (addr_t) lcqr->cpaddr &&
237 (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) {
238
239 DEV_MESSAGE(KERN_ERR, device,
240 "Failed CCW (%p) (area):",
241 (void *) (long) cpa);
242 hex_dump_memory(device, cqr->cpaddr - 10,
243 20*sizeof(struct ccw1));
244 }
245 }
246
247} /* end log_erp_chain */
248
249EXPORT_SYMBOL(dasd_default_erp_action);
250EXPORT_SYMBOL(dasd_default_erp_postaction);
251EXPORT_SYMBOL(dasd_alloc_erp_request);
252EXPORT_SYMBOL(dasd_free_erp_request);
253EXPORT_SYMBOL(dasd_log_sense);
254EXPORT_SYMBOL(dasd_log_ccw);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
new file mode 100644
index 000000000000..7963ae343eef
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.c
@@ -0,0 +1,607 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_fba.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 *
7 * $Revision: 1.39 $
8 */
9
10#include <linux/config.h>
11#include <linux/stddef.h>
12#include <linux/kernel.h>
13#include <asm/debug.h>
14
15#include <linux/slab.h>
16#include <linux/hdreg.h> /* HDIO_GETGEO */
17#include <linux/bio.h>
18#include <linux/module.h>
19#include <linux/init.h>
20
21#include <asm/idals.h>
22#include <asm/ebcdic.h>
23#include <asm/io.h>
24#include <asm/todclk.h>
25#include <asm/ccwdev.h>
26
27#include "dasd_int.h"
28#include "dasd_fba.h"
29
30#ifdef PRINTK_HEADER
31#undef PRINTK_HEADER
32#endif /* PRINTK_HEADER */
33#define PRINTK_HEADER "dasd(fba):"
34
35#define DASD_FBA_CCW_WRITE 0x41
36#define DASD_FBA_CCW_READ 0x42
37#define DASD_FBA_CCW_LOCATE 0x43
38#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
39
40MODULE_LICENSE("GPL");
41
42static struct dasd_discipline dasd_fba_discipline;
43
44struct dasd_fba_private {
45 struct dasd_fba_characteristics rdc_data;
46};
47
48static struct ccw_device_id dasd_fba_ids[] = {
49 { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), driver_info: 0x1},
50 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), driver_info: 0x2},
51 { /* end of list */ },
52};
53
54MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
55
56static struct ccw_driver dasd_fba_driver; /* see below */
57static int
58dasd_fba_probe(struct ccw_device *cdev)
59{
60 int ret;
61
62 ret = dasd_generic_probe (cdev, &dasd_fba_discipline);
63 if (ret)
64 return ret;
65 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
66 return 0;
67}
68
69static int
70dasd_fba_set_online(struct ccw_device *cdev)
71{
72 return dasd_generic_set_online (cdev, &dasd_fba_discipline);
73}
74
75static struct ccw_driver dasd_fba_driver = {
76 .name = "dasd-fba",
77 .owner = THIS_MODULE,
78 .ids = dasd_fba_ids,
79 .probe = dasd_fba_probe,
80 .remove = dasd_generic_remove,
81 .set_offline = dasd_generic_set_offline,
82 .set_online = dasd_fba_set_online,
83 .notify = dasd_generic_notify,
84};
85
86static inline void
87define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
88 int blksize, int beg, int nr)
89{
90 ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
91 ccw->flags = 0;
92 ccw->count = 16;
93 ccw->cda = (__u32) __pa(data);
94 memset(data, 0, sizeof (struct DE_fba_data));
95 if (rw == WRITE)
96 (data->mask).perm = 0x0;
97 else if (rw == READ)
98 (data->mask).perm = 0x1;
99 else
100 data->mask.perm = 0x2;
101 data->blk_size = blksize;
102 data->ext_loc = beg;
103 data->ext_end = nr - 1;
104}
105
106static inline void
107locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
108 int block_nr, int block_ct)
109{
110 ccw->cmd_code = DASD_FBA_CCW_LOCATE;
111 ccw->flags = 0;
112 ccw->count = 8;
113 ccw->cda = (__u32) __pa(data);
114 memset(data, 0, sizeof (struct LO_fba_data));
115 if (rw == WRITE)
116 data->operation.cmd = 0x5;
117 else if (rw == READ)
118 data->operation.cmd = 0x6;
119 else
120 data->operation.cmd = 0x8;
121 data->blk_nr = block_nr;
122 data->blk_ct = block_ct;
123}
124
125static int
126dasd_fba_check_characteristics(struct dasd_device *device)
127{
128 struct dasd_fba_private *private;
129 struct ccw_device *cdev = device->cdev;
130 void *rdc_data;
131 int rc;
132
133 private = (struct dasd_fba_private *) device->private;
134 if (private == NULL) {
135 private = kmalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
136 if (private == NULL) {
137 DEV_MESSAGE(KERN_WARNING, device, "%s",
138 "memory allocation failed for private "
139 "data");
140 return -ENOMEM;
141 }
142 device->private = (void *) private;
143 }
144 /* Read Device Characteristics */
145 rdc_data = (void *) &(private->rdc_data);
146 rc = read_dev_chars(device->cdev, &rdc_data, 32);
147 if (rc) {
148 DEV_MESSAGE(KERN_WARNING, device,
149 "Read device characteristics returned error %d",
150 rc);
151 return rc;
152 }
153
154 DEV_MESSAGE(KERN_INFO, device,
155 "%04X/%02X(CU:%04X/%02X) %dMB at(%d B/blk)",
156 cdev->id.dev_type,
157 cdev->id.dev_model,
158 cdev->id.cu_type,
159 cdev->id.cu_model,
160 ((private->rdc_data.blk_bdsa *
161 (private->rdc_data.blk_size >> 9)) >> 11),
162 private->rdc_data.blk_size);
163 return 0;
164}
165
166static int
167dasd_fba_do_analysis(struct dasd_device *device)
168{
169 struct dasd_fba_private *private;
170 int sb, rc;
171
172 private = (struct dasd_fba_private *) device->private;
173 rc = dasd_check_blocksize(private->rdc_data.blk_size);
174 if (rc) {
175 DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d",
176 private->rdc_data.blk_size);
177 return rc;
178 }
179 device->blocks = private->rdc_data.blk_bdsa;
180 device->bp_block = private->rdc_data.blk_size;
181 device->s2b_shift = 0; /* bits to shift 512 to get a block */
182 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
183 device->s2b_shift++;
184 return 0;
185}
186
187static int
188dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
189{
190 if (dasd_check_blocksize(device->bp_block) != 0)
191 return -EINVAL;
192 geo->cylinders = (device->blocks << device->s2b_shift) >> 10;
193 geo->heads = 16;
194 geo->sectors = 128 >> device->s2b_shift;
195 return 0;
196}
197
198static dasd_era_t
199dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
200{
201 struct dasd_device *device;
202 struct ccw_device *cdev;
203
204 device = (struct dasd_device *) cqr->device;
205 if (irb->scsw.cstat == 0x00 &&
206 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
207 return dasd_era_none;
208
209 cdev = device->cdev;
210 switch (cdev->id.dev_type) {
211 case 0x3370:
212 return dasd_3370_erp_examine(cqr, irb);
213 case 0x9336:
214 return dasd_9336_erp_examine(cqr, irb);
215 default:
216 return dasd_era_recover;
217 }
218}
219
220static dasd_erp_fn_t
221dasd_fba_erp_action(struct dasd_ccw_req * cqr)
222{
223 return dasd_default_erp_action;
224}
225
226static dasd_erp_fn_t
227dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
228{
229 if (cqr->function == dasd_default_erp_action)
230 return dasd_default_erp_postaction;
231
232 DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p",
233 cqr->function);
234 return NULL;
235}
236
237static struct dasd_ccw_req *
238dasd_fba_build_cp(struct dasd_device * device, struct request *req)
239{
240 struct dasd_fba_private *private;
241 unsigned long *idaws;
242 struct LO_fba_data *LO_data;
243 struct dasd_ccw_req *cqr;
244 struct ccw1 *ccw;
245 struct bio *bio;
246 struct bio_vec *bv;
247 char *dst;
248 int count, cidaw, cplength, datasize;
249 sector_t recid, first_rec, last_rec;
250 unsigned int blksize, off;
251 unsigned char cmd;
252 int i;
253
254 private = (struct dasd_fba_private *) device->private;
255 if (rq_data_dir(req) == READ) {
256 cmd = DASD_FBA_CCW_READ;
257 } else if (rq_data_dir(req) == WRITE) {
258 cmd = DASD_FBA_CCW_WRITE;
259 } else
260 return ERR_PTR(-EINVAL);
261 blksize = device->bp_block;
262 /* Calculate record id of first and last block. */
263 first_rec = req->sector >> device->s2b_shift;
264 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift;
265 /* Check struct bio and count the number of blocks for the request. */
266 count = 0;
267 cidaw = 0;
268 rq_for_each_bio(bio, req) {
269 bio_for_each_segment(bv, bio, i) {
270 if (bv->bv_len & (blksize - 1))
271 /* Fba can only do full blocks. */
272 return ERR_PTR(-EINVAL);
273 count += bv->bv_len >> (device->s2b_shift + 9);
274#if defined(CONFIG_ARCH_S390X)
275 if (idal_is_needed (page_address(bv->bv_page),
276 bv->bv_len))
277 cidaw += bv->bv_len / blksize;
278#endif
279 }
280 }
281 /* Paranoia. */
282 if (count != last_rec - first_rec + 1)
283 return ERR_PTR(-EINVAL);
284 /* 1x define extent + 1x locate record + number of blocks */
285 cplength = 2 + count;
286 /* 1x define extent + 1x locate record */
287 datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
288 cidaw * sizeof(unsigned long);
289 /*
290 * Find out number of additional locate record ccws if the device
291 * can't do data chaining.
292 */
293 if (private->rdc_data.mode.bits.data_chain == 0) {
294 cplength += count - 1;
295 datasize += (count - 1)*sizeof(struct LO_fba_data);
296 }
297 /* Allocate the ccw request. */
298 cqr = dasd_smalloc_request(dasd_fba_discipline.name,
299 cplength, datasize, device);
300 if (IS_ERR(cqr))
301 return cqr;
302 ccw = cqr->cpaddr;
303 /* First ccw is define extent. */
304 define_extent(ccw++, cqr->data, rq_data_dir(req),
305 device->bp_block, req->sector, req->nr_sectors);
306 /* Build locate_record + read/write ccws. */
307 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
308 LO_data = (struct LO_fba_data *) (idaws + cidaw);
309 /* Locate record for all blocks for smart devices. */
310 if (private->rdc_data.mode.bits.data_chain != 0) {
311 ccw[-1].flags |= CCW_FLAG_CC;
312 locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
313 }
314 recid = first_rec;
315 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
316 dst = page_address(bv->bv_page) + bv->bv_offset;
317 if (dasd_page_cache) {
318 char *copy = kmem_cache_alloc(dasd_page_cache,
319 SLAB_DMA | __GFP_NOWARN);
320 if (copy && rq_data_dir(req) == WRITE)
321 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
322 if (copy)
323 dst = copy + bv->bv_offset;
324 }
325 for (off = 0; off < bv->bv_len; off += blksize) {
326 /* Locate record for stupid devices. */
327 if (private->rdc_data.mode.bits.data_chain == 0) {
328 ccw[-1].flags |= CCW_FLAG_CC;
329 locate_record(ccw, LO_data++,
330 rq_data_dir(req),
331 recid - first_rec, 1);
332 ccw->flags = CCW_FLAG_CC;
333 ccw++;
334 } else {
335 if (recid > first_rec)
336 ccw[-1].flags |= CCW_FLAG_DC;
337 else
338 ccw[-1].flags |= CCW_FLAG_CC;
339 }
340 ccw->cmd_code = cmd;
341 ccw->count = device->bp_block;
342 if (idal_is_needed(dst, blksize)) {
343 ccw->cda = (__u32)(addr_t) idaws;
344 ccw->flags = CCW_FLAG_IDA;
345 idaws = idal_create_words(idaws, dst, blksize);
346 } else {
347 ccw->cda = (__u32)(addr_t) dst;
348 ccw->flags = 0;
349 }
350 ccw++;
351 dst += blksize;
352 recid++;
353 }
354 }
355 cqr->device = device;
356 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
357 cqr->status = DASD_CQR_FILLED;
358 return cqr;
359}
360
361static int
362dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
363{
364 struct dasd_fba_private *private;
365 struct ccw1 *ccw;
366 struct bio *bio;
367 struct bio_vec *bv;
368 char *dst, *cda;
369 unsigned int blksize, off;
370 int i, status;
371
372 if (!dasd_page_cache)
373 goto out;
374 private = (struct dasd_fba_private *) cqr->device->private;
375 blksize = cqr->device->bp_block;
376 ccw = cqr->cpaddr;
377 /* Skip over define extent & locate record. */
378 ccw++;
379 if (private->rdc_data.mode.bits.data_chain != 0)
380 ccw++;
381 rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
382 dst = page_address(bv->bv_page) + bv->bv_offset;
383 for (off = 0; off < bv->bv_len; off += blksize) {
384 /* Skip locate record. */
385 if (private->rdc_data.mode.bits.data_chain == 0)
386 ccw++;
387 if (dst) {
388 if (ccw->flags & CCW_FLAG_IDA)
389 cda = *((char **)((addr_t) ccw->cda));
390 else
391 cda = (char *)((addr_t) ccw->cda);
392 if (dst != cda) {
393 if (rq_data_dir(req) == READ)
394 memcpy(dst, cda, bv->bv_len);
395 kmem_cache_free(dasd_page_cache,
396 (void *)((addr_t)cda & PAGE_MASK));
397 }
398 dst = NULL;
399 }
400 ccw++;
401 }
402 }
403out:
404 status = cqr->status == DASD_CQR_DONE;
405 dasd_sfree_request(cqr, cqr->device);
406 return status;
407}
408
409static int
410dasd_fba_fill_info(struct dasd_device * device,
411 struct dasd_information2_t * info)
412{
413 info->label_block = 1;
414 info->FBA_layout = 1;
415 info->format = DASD_FORMAT_LDL;
416 info->characteristics_size = sizeof(struct dasd_fba_characteristics);
417 memcpy(info->characteristics,
418 &((struct dasd_fba_private *) device->private)->rdc_data,
419 sizeof (struct dasd_fba_characteristics));
420 info->confdata_size = 0;
421 return 0;
422}
423
424static void
425dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
426 struct irb *irb)
427{
428 char *page;
429 struct ccw1 *act, *end, *last;
430 int len, sl, sct, count;
431
432 page = (char *) get_zeroed_page(GFP_ATOMIC);
433 if (page == NULL) {
434 DEV_MESSAGE(KERN_ERR, device, " %s",
435 "No memory to dump sense data");
436 return;
437 }
438 len = sprintf(page, KERN_ERR PRINTK_HEADER
439 " I/O status report for device %s:\n",
440 device->cdev->dev.bus_id);
441 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
442 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
443 irb->scsw.cstat, irb->scsw.dstat);
444 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
445 " device %s: Failing CCW: %p\n",
446 device->cdev->dev.bus_id,
447 (void *) (addr_t) irb->scsw.cpa);
448 if (irb->esw.esw0.erw.cons) {
449 for (sl = 0; sl < 4; sl++) {
450 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
451 " Sense(hex) %2d-%2d:",
452 (8 * sl), ((8 * sl) + 7));
453
454 for (sct = 0; sct < 8; sct++) {
455 len += sprintf(page + len, " %02x",
456 irb->ecw[8 * sl + sct]);
457 }
458 len += sprintf(page + len, "\n");
459 }
460 } else {
461 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
462 " SORRY - NO VALID SENSE AVAILABLE\n");
463 }
464 MESSAGE_LOG(KERN_ERR, "%s",
465 page + sizeof(KERN_ERR PRINTK_HEADER));
466
467 /* dump the Channel Program */
468 /* print first CCWs (maximum 8) */
469 act = req->cpaddr;
470 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
471 end = min(act + 8, last);
472 len = sprintf(page, KERN_ERR PRINTK_HEADER
473 " Related CP in req: %p\n", req);
474 while (act <= end) {
475 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
476 " CCW %p: %08X %08X DAT:",
477 act, ((int *) act)[0], ((int *) act)[1]);
478 for (count = 0; count < 32 && count < act->count;
479 count += sizeof(int))
480 len += sprintf(page + len, " %08X",
481 ((int *) (addr_t) act->cda)
482 [(count>>2)]);
483 len += sprintf(page + len, "\n");
484 act++;
485 }
486 MESSAGE_LOG(KERN_ERR, "%s",
487 page + sizeof(KERN_ERR PRINTK_HEADER));
488
489
490 /* print failing CCW area */
491 len = 0;
492 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2) {
493 act = ((struct ccw1 *)(addr_t) irb->scsw.cpa) - 2;
494 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
495 }
496 end = min((struct ccw1 *)(addr_t) irb->scsw.cpa + 2, last);
497 while (act <= end) {
498 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
499 " CCW %p: %08X %08X DAT:",
500 act, ((int *) act)[0], ((int *) act)[1]);
501 for (count = 0; count < 32 && count < act->count;
502 count += sizeof(int))
503 len += sprintf(page + len, " %08X",
504 ((int *) (addr_t) act->cda)
505 [(count>>2)]);
506 len += sprintf(page + len, "\n");
507 act++;
508 }
509
510 /* print last CCWs */
511 if (act < last - 2) {
512 act = last - 2;
513 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
514 }
515 while (act <= last) {
516 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
517 " CCW %p: %08X %08X DAT:",
518 act, ((int *) act)[0], ((int *) act)[1]);
519 for (count = 0; count < 32 && count < act->count;
520 count += sizeof(int))
521 len += sprintf(page + len, " %08X",
522 ((int *) (addr_t) act->cda)
523 [(count>>2)]);
524 len += sprintf(page + len, "\n");
525 act++;
526 }
527 if (len > 0)
528 MESSAGE_LOG(KERN_ERR, "%s",
529 page + sizeof(KERN_ERR PRINTK_HEADER));
530 free_page((unsigned long) page);
531}
532
533/*
534 * max_blocks is dependent on the amount of storage that is available
535 * in the static io buffer for each device. Currently each device has
536 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
537 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
538 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
539 * addition we have one define extent ccw + 16 bytes of data and a
540 * locate record ccw for each block (stupid devices!) + 16 bytes of data.
541 * That makes:
542 * (8192 - 24 - 136 - 8 - 16) / 40 = 200.2 blocks at maximum.
543 * We want to fit two into the available memory so that we can immediately
544 * start the next request if one finishes off. That makes 100.1 blocks
545 * for one request. Give a little safety and the result is 96.
546 */
547static struct dasd_discipline dasd_fba_discipline = {
548 .owner = THIS_MODULE,
549 .name = "FBA ",
550 .ebcname = "FBA ",
551 .max_blocks = 96,
552 .check_device = dasd_fba_check_characteristics,
553 .do_analysis = dasd_fba_do_analysis,
554 .fill_geometry = dasd_fba_fill_geometry,
555 .start_IO = dasd_start_IO,
556 .term_IO = dasd_term_IO,
557 .examine_error = dasd_fba_examine_error,
558 .erp_action = dasd_fba_erp_action,
559 .erp_postaction = dasd_fba_erp_postaction,
560 .build_cp = dasd_fba_build_cp,
561 .free_cp = dasd_fba_free_cp,
562 .dump_sense = dasd_fba_dump_sense,
563 .fill_info = dasd_fba_fill_info,
564};
565
566static int __init
567dasd_fba_init(void)
568{
569 int ret;
570
571 ASCEBC(dasd_fba_discipline.ebcname, 4);
572
573 ret = ccw_driver_register(&dasd_fba_driver);
574 if (ret)
575 return ret;
576
577 dasd_generic_auto_online(&dasd_fba_driver);
578 return 0;
579}
580
581static void __exit
582dasd_fba_cleanup(void)
583{
584 ccw_driver_unregister(&dasd_fba_driver);
585}
586
587module_init(dasd_fba_init);
588module_exit(dasd_fba_cleanup);
589
590/*
591 * Overrides for Emacs so that we follow Linus's tabbing style.
592 * Emacs will notice this stuff at the end of the file and automatically
593 * adjust the settings for this buffer only. This must remain at the end
594 * of the file.
595 * ---------------------------------------------------------------------------
596 * Local variables:
597 * c-indent-level: 4
598 * c-brace-imaginary-offset: 0
599 * c-brace-offset: -4
600 * c-argdecl-indent: 4
601 * c-label-offset: -4
602 * c-continued-statement-offset: 4
603 * c-continued-brace-offset: 0
604 * indent-tabs-mode: 1
605 * tab-width: 8
606 * End:
607 */
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
new file mode 100644
index 000000000000..624f0402ee22
--- /dev/null
+++ b/drivers/s390/block/dasd_fba.h
@@ -0,0 +1,73 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_fba.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
6 *
7 * $Revision: 1.6 $
8 */
9
10#ifndef DASD_FBA_H
11#define DASD_FBA_H
12
13struct DE_fba_data {
14 struct {
15 unsigned char perm:2; /* Permissions on this extent */
16 unsigned char zero:2; /* Must be zero */
17 unsigned char da:1; /* usually zero */
18 unsigned char diag:1; /* allow diagnose */
19 unsigned char zero2:2; /* zero */
20 } __attribute__ ((packed)) mask;
21 __u8 zero; /* Must be zero */
22 __u16 blk_size; /* Blocksize */
23 __u32 ext_loc; /* Extent locator */
24 __u32 ext_beg; /* logical number of block 0 in extent */
25 __u32 ext_end; /* logocal number of last block in extent */
26} __attribute__ ((packed));
27
28struct LO_fba_data {
29 struct {
30 unsigned char zero:4;
31 unsigned char cmd:4;
32 } __attribute__ ((packed)) operation;
33 __u8 auxiliary;
34 __u16 blk_ct;
35 __u32 blk_nr;
36} __attribute__ ((packed));
37
38struct dasd_fba_characteristics {
39 union {
40 __u8 c;
41 struct {
42 unsigned char reserved:1;
43 unsigned char overrunnable:1;
44 unsigned char burst_byte:1;
45 unsigned char data_chain:1;
46 unsigned char zeros:4;
47 } __attribute__ ((packed)) bits;
48 } __attribute__ ((packed)) mode;
49 union {
50 __u8 c;
51 struct {
52 unsigned char zero0:1;
53 unsigned char removable:1;
54 unsigned char shared:1;
55 unsigned char zero1:1;
56 unsigned char mam:1;
57 unsigned char zeros:3;
58 } __attribute__ ((packed)) bits;
59 } __attribute__ ((packed)) features;
60 __u8 dev_class;
61 __u8 unit_type;
62 __u16 blk_size;
63 __u32 blk_per_cycl;
64 __u32 blk_per_bound;
65 __u32 blk_bdsa;
66 __u32 reserved0;
67 __u16 reserved1;
68 __u16 blk_ce;
69 __u32 reserved2;
70 __u16 reserved3;
71} __attribute__ ((packed));
72
73#endif /* DASD_FBA_H */
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
new file mode 100644
index 000000000000..1d52db406b2e
--- /dev/null
+++ b/drivers/s390/block/dasd_genhd.c
@@ -0,0 +1,185 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_genhd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * gendisk related functions for the dasd driver.
11 *
12 * $Revision: 1.48 $
13 */
14
15#include <linux/config.h>
16#include <linux/interrupt.h>
17#include <linux/fs.h>
18#include <linux/blkpg.h>
19
20#include <asm/uaccess.h>
21
22/* This is ugly... */
23#define PRINTK_HEADER "dasd_gendisk:"
24
25#include "dasd_int.h"
26
27/*
28 * Allocate and register gendisk structure for device.
29 */
30int
31dasd_gendisk_alloc(struct dasd_device *device)
32{
33 struct gendisk *gdp;
34 int len;
35
36 /* Make sure the minor for this device exists. */
37 if (device->devindex >= DASD_PER_MAJOR)
38 return -EBUSY;
39
40 gdp = alloc_disk(1 << DASD_PARTN_BITS);
41 if (!gdp)
42 return -ENOMEM;
43
44 /* Initialize gendisk structure. */
45 gdp->major = DASD_MAJOR;
46 gdp->first_minor = device->devindex << DASD_PARTN_BITS;
47 gdp->fops = &dasd_device_operations;
48 gdp->driverfs_dev = &device->cdev->dev;
49
50 /*
51 * Set device name.
52 * dasda - dasdz : 26 devices
53 * dasdaa - dasdzz : 676 devices, added up = 702
54 * dasdaaa - dasdzzz : 17576 devices, added up = 18278
55 * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
56 */
57 len = sprintf(gdp->disk_name, "dasd");
58 if (device->devindex > 25) {
59 if (device->devindex > 701) {
60 if (device->devindex > 18277)
61 len += sprintf(gdp->disk_name + len, "%c",
62 'a'+(((device->devindex-18278)
63 /17576)%26));
64 len += sprintf(gdp->disk_name + len, "%c",
65 'a'+(((device->devindex-702)/676)%26));
66 }
67 len += sprintf(gdp->disk_name + len, "%c",
68 'a'+(((device->devindex-26)/26)%26));
69 }
70 len += sprintf(gdp->disk_name + len, "%c", 'a'+(device->devindex%26));
71
72 sprintf(gdp->devfs_name, "dasd/%s", device->cdev->dev.bus_id);
73
74 if (test_bit(DASD_FLAG_RO, &device->flags))
75 set_disk_ro(gdp, 1);
76 gdp->private_data = device;
77 gdp->queue = device->request_queue;
78 device->gdp = gdp;
79 set_capacity(device->gdp, 0);
80 add_disk(device->gdp);
81 return 0;
82}
83
84/*
85 * Unregister and free gendisk structure for device.
86 */
87void
88dasd_gendisk_free(struct dasd_device *device)
89{
90 del_gendisk(device->gdp);
91 device->gdp->queue = 0;
92 put_disk(device->gdp);
93 device->gdp = 0;
94}
95
96/*
97 * Trigger a partition detection.
98 */
99int
100dasd_scan_partitions(struct dasd_device * device)
101{
102 struct block_device *bdev;
103
104 /* Make the disk known. */
105 set_capacity(device->gdp, device->blocks << device->s2b_shift);
106 bdev = bdget_disk(device->gdp, 0);
107 if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
108 return -ENODEV;
109 /*
110 * See fs/partition/check.c:register_disk,rescan_partitions
111 * Can't call rescan_partitions directly. Use ioctl.
112 */
113 ioctl_by_bdev(bdev, BLKRRPART, 0);
114 /*
115 * Since the matching blkdev_put call to the blkdev_get in
116 * this function is not called before dasd_destroy_partitions
117 * the offline open_count limit needs to be increased from
118 * 0 to 1. This is done by setting device->bdev (see
119 * dasd_generic_set_offline). As long as the partition
120 * detection is running no offline should be allowed. That
121 * is why the assignment to device->bdev is done AFTER
122 * the BLKRRPART ioctl.
123 */
124 device->bdev = bdev;
125 return 0;
126}
127
128/*
129 * Remove all inodes in the system for a device, delete the
130 * partitions and make device unusable by setting its size to zero.
131 */
132void
133dasd_destroy_partitions(struct dasd_device * device)
134{
135 /* The two structs have 168/176 byte on 31/64 bit. */
136 struct blkpg_partition bpart;
137 struct blkpg_ioctl_arg barg;
138 struct block_device *bdev;
139
140 /*
141 * Get the bdev pointer from the device structure and clear
142 * device->bdev to lower the offline open_count limit again.
143 */
144 bdev = device->bdev;
145 device->bdev = 0;
146
147 /*
148 * See fs/partition/check.c:delete_partition
149 * Can't call delete_partitions directly. Use ioctl.
150 * The ioctl also does locking and invalidation.
151 */
152 memset(&bpart, 0, sizeof(struct blkpg_partition));
153 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
154 barg.data = &bpart;
155 barg.op = BLKPG_DEL_PARTITION;
156 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
157 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
158
159 invalidate_partition(device->gdp, 0);
160 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
161 blkdev_put(bdev);
162 set_capacity(device->gdp, 0);
163}
164
165int
166dasd_gendisk_init(void)
167{
168 int rc;
169
170 /* Register to static dasd major 94 */
171 rc = register_blkdev(DASD_MAJOR, "dasd");
172 if (rc != 0) {
173 MESSAGE(KERN_WARNING,
174 "Couldn't register successfully to "
175 "major no %d", DASD_MAJOR);
176 return rc;
177 }
178 return 0;
179}
180
181void
182dasd_gendisk_exit(void)
183{
184 unregister_blkdev(DASD_MAJOR, "dasd");
185}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
new file mode 100644
index 000000000000..4586e0ecc526
--- /dev/null
+++ b/drivers/s390/block/dasd_int.h
@@ -0,0 +1,576 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_int.h
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Bugreports.to..: <Linux390@de.ibm.com>
7 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
8 *
9 * $Revision: 1.63 $
10 */
11
12#ifndef DASD_INT_H
13#define DASD_INT_H
14
15#ifdef __KERNEL__
16
17/* erp debugging in dasd.c and dasd_3990_erp.c */
18#define ERP_DEBUG
19
20
21/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
22#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
23#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
24
25/*
26 * States a dasd device can have:
27 * new: the dasd_device structure is allocated.
28 * known: the discipline for the device is identified.
29 * basic: the device can do basic i/o.
30 * accept: the device is analysed (format is known).
31 * ready: partition detection is done and the device is can do block io.
32 * online: the device accepts requests from the block device queue.
33 *
34 * Things to do for startup state transitions:
35 * new -> known: find discipline for the device and create devfs entries.
36 * known -> basic: request irq line for the device.
37 * basic -> ready: do the initial analysis, e.g. format detection,
38 * do block device setup and detect partitions.
39 * ready -> online: schedule the device tasklet.
40 * Things to do for shutdown state transitions:
41 * online -> ready: just set the new device state.
42 * ready -> basic: flush requests from the block device layer, clear
43 * partition information and reset format information.
44 * basic -> known: terminate all requests and free irq.
45 * known -> new: remove devfs entries and forget discipline.
46 */
47
48#define DASD_STATE_NEW 0
49#define DASD_STATE_KNOWN 1
50#define DASD_STATE_BASIC 2
51#define DASD_STATE_READY 3
52#define DASD_STATE_ONLINE 4
53
54#include <linux/module.h>
55#include <linux/wait.h>
56#include <linux/blkdev.h>
57#include <linux/devfs_fs_kernel.h>
58#include <linux/genhd.h>
59#include <linux/hdreg.h>
60#include <linux/interrupt.h>
61#include <asm/ccwdev.h>
62#include <linux/workqueue.h>
63#include <asm/debug.h>
64#include <asm/dasd.h>
65#include <asm/idals.h>
66
67/*
68 * SECTION: Type definitions
69 */
70struct dasd_device;
71
72typedef int (*dasd_ioctl_fn_t) (struct block_device *bdev, int no, long args);
73
74struct dasd_ioctl {
75 struct list_head list;
76 struct module *owner;
77 int no;
78 dasd_ioctl_fn_t handler;
79};
80
81typedef enum {
82 dasd_era_fatal = -1, /* no chance to recover */
83 dasd_era_none = 0, /* don't recover, everything alright */
84 dasd_era_msg = 1, /* don't recover, just report... */
85 dasd_era_recover = 2 /* recovery action recommended */
86} dasd_era_t;
87
88/* BIT DEFINITIONS FOR SENSE DATA */
89#define DASD_SENSE_BIT_0 0x80
90#define DASD_SENSE_BIT_1 0x40
91#define DASD_SENSE_BIT_2 0x20
92#define DASD_SENSE_BIT_3 0x10
93
94/*
95 * SECTION: MACROs for klogd and s390 debug feature (dbf)
96 */
97#define DBF_DEV_EVENT(d_level, d_device, d_str, d_data...) \
98do { \
99 debug_sprintf_event(d_device->debug_area, \
100 d_level, \
101 d_str "\n", \
102 d_data); \
103} while(0)
104
105#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
106do { \
107 debug_sprintf_exception(d_device->debug_area, \
108 d_level, \
109 d_str "\n", \
110 d_data); \
111} while(0)
112
113#define DBF_EVENT(d_level, d_str, d_data...)\
114do { \
115 debug_sprintf_event(dasd_debug_area, \
116 d_level,\
117 d_str "\n", \
118 d_data); \
119} while(0)
120
121#define DBF_EXC(d_level, d_str, d_data...)\
122do { \
123 debug_sprintf_exception(dasd_debug_area, \
124 d_level,\
125 d_str "\n", \
126 d_data); \
127} while(0)
128
129/* definition of dbf debug levels */
130#define DBF_EMERG 0 /* system is unusable */
131#define DBF_ALERT 1 /* action must be taken immediately */
132#define DBF_CRIT 2 /* critical conditions */
133#define DBF_ERR 3 /* error conditions */
134#define DBF_WARNING 4 /* warning conditions */
135#define DBF_NOTICE 5 /* normal but significant condition */
136#define DBF_INFO 6 /* informational */
137#define DBF_DEBUG 6 /* debug-level messages */
138
139/* messages to be written via klogd and dbf */
140#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
141do { \
142 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
143 d_device->cdev->dev.bus_id, d_args); \
144 DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
145} while(0)
146
147#define MESSAGE(d_loglevel,d_string,d_args...)\
148do { \
149 printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
150 DBF_EVENT(DBF_ALERT, d_string, d_args); \
151} while(0)
152
153/* messages to be written via klogd only */
154#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
155do { \
156 printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
157 d_device->cdev->dev.bus_id, d_args); \
158} while(0)
159
160#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
161do { \
162 printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
163} while(0)
164
165struct dasd_ccw_req {
166 unsigned int magic; /* Eye catcher */
167 struct list_head list; /* list_head for request queueing. */
168
169 /* Where to execute what... */
170 struct dasd_device *device; /* device the request is for */
171 struct ccw1 *cpaddr; /* address of channel program */
172 char status; /* status of this request */
173 short retries; /* A retry counter */
174 unsigned long flags; /* flags of this request */
175
176 /* ... and how */
177 unsigned long starttime; /* jiffies time of request start */
178 int expires; /* expiration period in jiffies */
179 char lpm; /* logical path mask */
180 void *data; /* pointer to data area */
181
182 /* these are important for recovering erroneous requests */
183 struct irb irb; /* device status in case of an error */
184 struct dasd_ccw_req *refers; /* ERP-chain queueing. */
185 void *function; /* originating ERP action */
186
187 /* these are for statistics only */
188 unsigned long long buildclk; /* TOD-clock of request generation */
189 unsigned long long startclk; /* TOD-clock of request start */
190 unsigned long long stopclk; /* TOD-clock of request interrupt */
191 unsigned long long endclk; /* TOD-clock of request termination */
192
193 /* Callback that is called after reaching final status. */
194 void (*callback)(struct dasd_ccw_req *, void *data);
195 void *callback_data;
196};
197
198/*
199 * dasd_ccw_req -> status can be:
200 */
201#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
202#define DASD_CQR_QUEUED 0x01 /* request is queued to be processed */
203#define DASD_CQR_IN_IO 0x02 /* request is currently in IO */
204#define DASD_CQR_DONE 0x03 /* request is completed successfully */
205#define DASD_CQR_ERROR 0x04 /* request is completed with error */
206#define DASD_CQR_FAILED 0x05 /* request is finally failed */
207#define DASD_CQR_CLEAR 0x06 /* request is clear pending */
208
209/* per dasd_ccw_req flags */
210#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
211
212/* Signature for error recovery functions. */
213typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
214
215/*
216 * the struct dasd_discipline is
217 * sth like a table of virtual functions, if you think of dasd_eckd
218 * inheriting dasd...
219 * no, currently we are not planning to reimplement the driver in C++
220 */
221struct dasd_discipline {
222 struct module *owner;
223 char ebcname[8]; /* a name used for tagging and printks */
224 char name[8]; /* a name used for tagging and printks */
225 int max_blocks; /* maximum number of blocks to be chained */
226
227 struct list_head list; /* used for list of disciplines */
228
229 /*
230 * Device recognition functions. check_device is used to verify
231 * the sense data and the information returned by read device
232 * characteristics. It returns 0 if the discipline can be used
233 * for the device in question.
234 * do_analysis is used in the step from device state "basic" to
235 * state "accept". It returns 0 if the device can be made ready,
236 * it returns -EMEDIUMTYPE if the device can't be made ready or
237 * -EAGAIN if do_analysis started a ccw that needs to complete
238 * before the analysis may be repeated.
239 */
240 int (*check_device)(struct dasd_device *);
241 int (*do_analysis) (struct dasd_device *);
242
243 /*
244 * Device operation functions. build_cp creates a ccw chain for
245 * a block device request, start_io starts the request and
246 * term_IO cancels it (e.g. in case of a timeout). format_device
247 * returns a ccw chain to be used to format the device.
248 */
249 struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
250 struct request *);
251 int (*start_IO) (struct dasd_ccw_req *);
252 int (*term_IO) (struct dasd_ccw_req *);
253 struct dasd_ccw_req *(*format_device) (struct dasd_device *,
254 struct format_data_t *);
255 int (*free_cp) (struct dasd_ccw_req *, struct request *);
256 /*
257 * Error recovery functions. examine_error() returns a value that
258 * indicates what to do for an error condition. If examine_error()
259 * returns 'dasd_era_recover' erp_action() is called to create a
260 * special error recovery ccw. erp_postaction() is called after
261 * an error recovery ccw has finished its execution. dump_sense
262 * is called for every error condition to print the sense data
263 * to the console.
264 */
265 dasd_era_t(*examine_error) (struct dasd_ccw_req *, struct irb *);
266 dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
267 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
268 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
269 struct irb *);
270
271 /* i/o control functions. */
272 int (*fill_geometry) (struct dasd_device *, struct hd_geometry *);
273 int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
274};
275
276extern struct dasd_discipline *dasd_diag_discipline_pointer;
277
278struct dasd_device {
279 /* Block device stuff. */
280 struct gendisk *gdp;
281 request_queue_t *request_queue;
282 spinlock_t request_queue_lock;
283 struct block_device *bdev;
284 unsigned int devindex;
285 unsigned long blocks; /* size of volume in blocks */
286 unsigned int bp_block; /* bytes per block */
287 unsigned int s2b_shift; /* log2 (bp_block/512) */
288 unsigned long flags; /* per device flags */
289
290 /* Device discipline stuff. */
291 struct dasd_discipline *discipline;
292 char *private;
293
294 /* Device state and target state. */
295 int state, target;
296 int stopped; /* device (ccw_device_start) was stopped */
297
298 /* Open and reference count. */
299 atomic_t ref_count;
300 atomic_t open_count;
301
302 /* ccw queue and memory for static ccw/erp buffers. */
303 struct list_head ccw_queue;
304 spinlock_t mem_lock;
305 void *ccw_mem;
306 void *erp_mem;
307 struct list_head ccw_chunks;
308 struct list_head erp_chunks;
309
310 atomic_t tasklet_scheduled;
311 struct tasklet_struct tasklet;
312 struct work_struct kick_work;
313 struct timer_list timer;
314
315 debug_info_t *debug_area;
316
317 struct ccw_device *cdev;
318
319#ifdef CONFIG_DASD_PROFILE
320 struct dasd_profile_info_t profile;
321#endif
322};
323
324/* reasons why device (ccw_device_start) was stopped */
325#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
326#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
327#define DASD_STOPPED_PENDING 4 /* long busy */
328#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
329#define DASD_STOPPED_DC_EIO 16 /* disconnected, return -EIO */
330
331/* per device flags */
332#define DASD_FLAG_RO 0 /* device is read-only */
333#define DASD_FLAG_USE_DIAG 1 /* use diag disciplnie */
334#define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */
335#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
336
337void dasd_put_device_wake(struct dasd_device *);
338
339/*
340 * Reference count inliners
341 */
342static inline void
343dasd_get_device(struct dasd_device *device)
344{
345 atomic_inc(&device->ref_count);
346}
347
348static inline void
349dasd_put_device(struct dasd_device *device)
350{
351 if (atomic_dec_return(&device->ref_count) == 0)
352 dasd_put_device_wake(device);
353}
354
355/*
356 * The static memory in ccw_mem and erp_mem is managed by a sorted
357 * list of free memory chunks.
358 */
359struct dasd_mchunk
360{
361 struct list_head list;
362 unsigned long size;
363} __attribute__ ((aligned(8)));
364
365static inline void
366dasd_init_chunklist(struct list_head *chunk_list, void *mem,
367 unsigned long size)
368{
369 struct dasd_mchunk *chunk;
370
371 INIT_LIST_HEAD(chunk_list);
372 chunk = (struct dasd_mchunk *) mem;
373 chunk->size = size - sizeof(struct dasd_mchunk);
374 list_add(&chunk->list, chunk_list);
375}
376
377static inline void *
378dasd_alloc_chunk(struct list_head *chunk_list, unsigned long size)
379{
380 struct dasd_mchunk *chunk, *tmp;
381
382 size = (size + 7L) & -8L;
383 list_for_each_entry(chunk, chunk_list, list) {
384 if (chunk->size < size)
385 continue;
386 if (chunk->size > size + sizeof(struct dasd_mchunk)) {
387 char *endaddr = (char *) (chunk + 1) + chunk->size;
388 tmp = (struct dasd_mchunk *) (endaddr - size) - 1;
389 tmp->size = size;
390 chunk->size -= size + sizeof(struct dasd_mchunk);
391 chunk = tmp;
392 } else
393 list_del(&chunk->list);
394 return (void *) (chunk + 1);
395 }
396 return NULL;
397}
398
399static inline void
400dasd_free_chunk(struct list_head *chunk_list, void *mem)
401{
402 struct dasd_mchunk *chunk, *tmp;
403 struct list_head *p, *left;
404
405 chunk = (struct dasd_mchunk *)
406 ((char *) mem - sizeof(struct dasd_mchunk));
407 /* Find out the left neighbour in chunk_list. */
408 left = chunk_list;
409 list_for_each(p, chunk_list) {
410 if (list_entry(p, struct dasd_mchunk, list) > chunk)
411 break;
412 left = p;
413 }
414 /* Try to merge with right neighbour = next element from left. */
415 if (left->next != chunk_list) {
416 tmp = list_entry(left->next, struct dasd_mchunk, list);
417 if ((char *) (chunk + 1) + chunk->size == (char *) tmp) {
418 list_del(&tmp->list);
419 chunk->size += tmp->size + sizeof(struct dasd_mchunk);
420 }
421 }
422 /* Try to merge with left neighbour. */
423 if (left != chunk_list) {
424 tmp = list_entry(left, struct dasd_mchunk, list);
425 if ((char *) (tmp + 1) + tmp->size == (char *) chunk) {
426 tmp->size += chunk->size + sizeof(struct dasd_mchunk);
427 return;
428 }
429 }
430 __list_add(&chunk->list, left, left->next);
431}
432
433/*
434 * Check if bsize is in { 512, 1024, 2048, 4096 }
435 */
436static inline int
437dasd_check_blocksize(int bsize)
438{
439 if (bsize < 512 || bsize > 4096 || (bsize & (bsize - 1)) != 0)
440 return -EMEDIUMTYPE;
441 return 0;
442}
443
444/* externals in dasd.c */
445#define DASD_PROFILE_ON 1
446#define DASD_PROFILE_OFF 0
447
448extern debug_info_t *dasd_debug_area;
449extern struct dasd_profile_info_t dasd_global_profile;
450extern unsigned int dasd_profile_level;
451extern struct block_device_operations dasd_device_operations;
452
453extern kmem_cache_t *dasd_page_cache;
454
455struct dasd_ccw_req *
456dasd_kmalloc_request(char *, int, int, struct dasd_device *);
457struct dasd_ccw_req *
458dasd_smalloc_request(char *, int, int, struct dasd_device *);
459void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
460void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
461
462static inline int
463dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
464{
465 return set_normalized_cda(ccw, cda);
466}
467
468struct dasd_device *dasd_alloc_device(void);
469void dasd_free_device(struct dasd_device *);
470
471void dasd_enable_device(struct dasd_device *);
472void dasd_set_target_state(struct dasd_device *, int);
473void dasd_kick_device(struct dasd_device *);
474
475void dasd_add_request_head(struct dasd_ccw_req *);
476void dasd_add_request_tail(struct dasd_ccw_req *);
477int dasd_start_IO(struct dasd_ccw_req *);
478int dasd_term_IO(struct dasd_ccw_req *);
479void dasd_schedule_bh(struct dasd_device *);
480int dasd_sleep_on(struct dasd_ccw_req *);
481int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
482int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
483void dasd_set_timer(struct dasd_device *, int);
484void dasd_clear_timer(struct dasd_device *);
485int dasd_cancel_req(struct dasd_ccw_req *);
486int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
487void dasd_generic_remove (struct ccw_device *cdev);
488int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
489int dasd_generic_set_offline (struct ccw_device *cdev);
490int dasd_generic_notify(struct ccw_device *, int);
491void dasd_generic_auto_online (struct ccw_driver *);
492
493/* externals in dasd_devmap.c */
494extern int dasd_max_devindex;
495extern int dasd_probeonly;
496extern int dasd_autodetect;
497
498int dasd_devmap_init(void);
499void dasd_devmap_exit(void);
500
501struct dasd_device *dasd_create_device(struct ccw_device *);
502void dasd_delete_device(struct dasd_device *);
503
504int dasd_add_sysfs_files(struct ccw_device *);
505void dasd_remove_sysfs_files(struct ccw_device *);
506
507struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
508struct dasd_device *dasd_device_from_devindex(int);
509
510int dasd_parse(void);
511int dasd_busid_known(char *);
512
513/* externals in dasd_gendisk.c */
514int dasd_gendisk_init(void);
515void dasd_gendisk_exit(void);
516int dasd_gendisk_alloc(struct dasd_device *);
517void dasd_gendisk_free(struct dasd_device *);
518int dasd_scan_partitions(struct dasd_device *);
519void dasd_destroy_partitions(struct dasd_device *);
520
521/* externals in dasd_ioctl.c */
522int dasd_ioctl_init(void);
523void dasd_ioctl_exit(void);
524int dasd_ioctl_no_register(struct module *, int, dasd_ioctl_fn_t);
525int dasd_ioctl_no_unregister(struct module *, int, dasd_ioctl_fn_t);
526int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
527
528/* externals in dasd_proc.c */
529int dasd_proc_init(void);
530void dasd_proc_exit(void);
531
532/* externals in dasd_erp.c */
533struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
534struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
535struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
536 struct dasd_device *);
537void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
538void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
539void dasd_log_ccw(struct dasd_ccw_req *, int, __u32);
540
541/* externals in dasd_3370_erp.c */
542dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
543
544/* externals in dasd_3990_erp.c */
545dasd_era_t dasd_3990_erp_examine(struct dasd_ccw_req *, struct irb *);
546struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
547
548/* externals in dasd_9336_erp.c */
549dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *);
550
551/* externals in dasd_9336_erp.c */
552dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *);
553struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *);
554
555#endif /* __KERNEL__ */
556
557#endif /* DASD_H */
558
559/*
560 * Overrides for Emacs so that we follow Linus's tabbing style.
561 * Emacs will notice this stuff at the end of the file and automatically
562 * adjust the settings for this buffer only. This must remain at the end
563 * of the file.
564 * ---------------------------------------------------------------------------
565 * Local variables:
566 * c-indent-level: 4
567 * c-brace-imaginary-offset: 0
568 * c-brace-offset: -4
569 * c-argdecl-indent: 4
570 * c-label-offset: -4
571 * c-continued-statement-offset: 4
572 * c-continued-brace-offset: 0
573 * indent-tabs-mode: 1
574 * tab-width: 8
575 * End:
576 */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
new file mode 100644
index 000000000000..f1892baa3b18
--- /dev/null
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -0,0 +1,554 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_ioctl.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
9 *
10 * i/o controls for the dasd driver.
11 */
12#include <linux/config.h>
13#include <linux/interrupt.h>
14#include <linux/major.h>
15#include <linux/fs.h>
16#include <linux/blkpg.h>
17
18#include <asm/ccwdev.h>
19#include <asm/uaccess.h>
20
21/* This is ugly... */
22#define PRINTK_HEADER "dasd_ioctl:"
23
24#include "dasd_int.h"
25
26/*
27 * SECTION: ioctl functions.
28 */
29static struct list_head dasd_ioctl_list = LIST_HEAD_INIT(dasd_ioctl_list);
30
31/*
32 * Find the ioctl with number no.
33 */
34static struct dasd_ioctl *
35dasd_find_ioctl(int no)
36{
37 struct dasd_ioctl *ioctl;
38
39 list_for_each_entry (ioctl, &dasd_ioctl_list, list)
40 if (ioctl->no == no)
41 return ioctl;
42 return NULL;
43}
44
45/*
46 * Register ioctl with number no.
47 */
48int
49dasd_ioctl_no_register(struct module *owner, int no, dasd_ioctl_fn_t handler)
50{
51 struct dasd_ioctl *new;
52 if (dasd_find_ioctl(no))
53 return -EBUSY;
54 new = kmalloc(sizeof (struct dasd_ioctl), GFP_KERNEL);
55 if (new == NULL)
56 return -ENOMEM;
57 new->owner = owner;
58 new->no = no;
59 new->handler = handler;
60 list_add(&new->list, &dasd_ioctl_list);
61 return 0;
62}
63
64/*
65 * Deregister ioctl with number no.
66 */
67int
68dasd_ioctl_no_unregister(struct module *owner, int no, dasd_ioctl_fn_t handler)
69{
70 struct dasd_ioctl *old = dasd_find_ioctl(no);
71 if (old == NULL)
72 return -ENOENT;
73 if (old->no != no || old->handler != handler || owner != old->owner)
74 return -EINVAL;
75 list_del(&old->list);
76 kfree(old);
77 return 0;
78}
79
80int
81dasd_ioctl(struct inode *inp, struct file *filp,
82 unsigned int no, unsigned long data)
83{
84 struct block_device *bdev = inp->i_bdev;
85 struct dasd_device *device = bdev->bd_disk->private_data;
86 struct dasd_ioctl *ioctl;
87 const char *dir;
88 int rc;
89
90 if ((_IOC_DIR(no) != _IOC_NONE) && (data == 0)) {
91 PRINT_DEBUG("empty data ptr");
92 return -EINVAL;
93 }
94 dir = _IOC_DIR (no) == _IOC_NONE ? "0" :
95 _IOC_DIR (no) == _IOC_READ ? "r" :
96 _IOC_DIR (no) == _IOC_WRITE ? "w" :
97 _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u";
98 DBF_DEV_EVENT(DBF_DEBUG, device,
99 "ioctl 0x%08x %s'0x%x'%d(%d) with data %8lx", no,
100 dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data);
101 /* Search for ioctl no in the ioctl list. */
102 list_for_each_entry(ioctl, &dasd_ioctl_list, list) {
103 if (ioctl->no == no) {
104 /* Found a matching ioctl. Call it. */
105 if (!try_module_get(ioctl->owner))
106 continue;
107 rc = ioctl->handler(bdev, no, data);
108 module_put(ioctl->owner);
109 return rc;
110 }
111 }
112 /* No ioctl with number no. */
113 DBF_DEV_EVENT(DBF_INFO, device,
114 "unknown ioctl 0x%08x=%s'0x%x'%d(%d) data %8lx", no,
115 dir, _IOC_TYPE(no), _IOC_NR(no), _IOC_SIZE(no), data);
116 return -EINVAL;
117}
118
119static int
120dasd_ioctl_api_version(struct block_device *bdev, int no, long args)
121{
122 int ver = DASD_API_VERSION;
123 return put_user(ver, (int __user *) args);
124}
125
126/*
127 * Enable device.
128 * used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
129 */
130static int
131dasd_ioctl_enable(struct block_device *bdev, int no, long args)
132{
133 struct dasd_device *device;
134
135 if (!capable(CAP_SYS_ADMIN))
136 return -EACCES;
137 device = bdev->bd_disk->private_data;
138 if (device == NULL)
139 return -ENODEV;
140 dasd_enable_device(device);
141 /* Formatting the dasd device can change the capacity. */
142 down(&bdev->bd_sem);
143 i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9);
144 up(&bdev->bd_sem);
145 return 0;
146}
147
148/*
149 * Disable device.
150 * Used by dasdfmt. Disable I/O operations but allow ioctls.
151 */
152static int
153dasd_ioctl_disable(struct block_device *bdev, int no, long args)
154{
155 struct dasd_device *device;
156
157 if (!capable(CAP_SYS_ADMIN))
158 return -EACCES;
159 device = bdev->bd_disk->private_data;
160 if (device == NULL)
161 return -ENODEV;
162 /*
163 * Man this is sick. We don't do a real disable but only downgrade
164 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
165 * BIODASDDISABLE to disable accesses to the device via the block
166 * device layer but it still wants to do i/o on the device by
167 * using the BIODASDFMT ioctl. Therefore the correct state for the
168 * device is DASD_STATE_BASIC that allows to do basic i/o.
169 */
170 dasd_set_target_state(device, DASD_STATE_BASIC);
171 /*
172 * Set i_size to zero, since read, write, etc. check against this
173 * value.
174 */
175 down(&bdev->bd_sem);
176 i_size_write(bdev->bd_inode, 0);
177 up(&bdev->bd_sem);
178 return 0;
179}
180
181/*
182 * Quiesce device.
183 */
184static int
185dasd_ioctl_quiesce(struct block_device *bdev, int no, long args)
186{
187 struct dasd_device *device;
188 unsigned long flags;
189
190 if (!capable (CAP_SYS_ADMIN))
191 return -EACCES;
192
193 device = bdev->bd_disk->private_data;
194 if (device == NULL)
195 return -ENODEV;
196
197 DEV_MESSAGE (KERN_DEBUG, device, "%s",
198 "Quiesce IO on device");
199 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
200 device->stopped |= DASD_STOPPED_QUIESCE;
201 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
202 return 0;
203}
204
205
206/*
207 * Quiesce device.
208 */
209static int
210dasd_ioctl_resume(struct block_device *bdev, int no, long args)
211{
212 struct dasd_device *device;
213 unsigned long flags;
214
215 if (!capable (CAP_SYS_ADMIN))
216 return -EACCES;
217
218 device = bdev->bd_disk->private_data;
219 if (device == NULL)
220 return -ENODEV;
221
222 DEV_MESSAGE (KERN_DEBUG, device, "%s",
223 "resume IO on device");
224
225 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
226 device->stopped &= ~DASD_STOPPED_QUIESCE;
227 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
228
229 dasd_schedule_bh (device);
230 return 0;
231}
232
233/*
234 * performs formatting of _device_ according to _fdata_
235 * Note: The discipline's format_function is assumed to deliver formatting
236 * commands to format a single unit of the device. In terms of the ECKD
237 * devices this means CCWs are generated to format a single track.
238 */
239static int
240dasd_format(struct dasd_device * device, struct format_data_t * fdata)
241{
242 struct dasd_ccw_req *cqr;
243 int rc;
244
245 if (device->discipline->format_device == NULL)
246 return -EPERM;
247
248 if (device->state != DASD_STATE_BASIC) {
249 DEV_MESSAGE(KERN_WARNING, device, "%s",
250 "dasd_format: device is not disabled! ");
251 return -EBUSY;
252 }
253
254 DBF_DEV_EVENT(DBF_NOTICE, device,
255 "formatting units %d to %d (%d B blocks) flags %d",
256 fdata->start_unit,
257 fdata->stop_unit, fdata->blksize, fdata->intensity);
258
259 /* Since dasdfmt keeps the device open after it was disabled,
260 * there still exists an inode for this device.
261 * We must update i_blkbits, otherwise we might get errors when
262 * enabling the device later.
263 */
264 if (fdata->start_unit == 0) {
265 struct block_device *bdev = bdget_disk(device->gdp, 0);
266 bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
267 bdput(bdev);
268 }
269
270 while (fdata->start_unit <= fdata->stop_unit) {
271 cqr = device->discipline->format_device(device, fdata);
272 if (IS_ERR(cqr))
273 return PTR_ERR(cqr);
274 rc = dasd_sleep_on_interruptible(cqr);
275 dasd_sfree_request(cqr, cqr->device);
276 if (rc) {
277 if (rc != -ERESTARTSYS)
278 DEV_MESSAGE(KERN_ERR, device,
279 " Formatting of unit %d failed "
280 "with rc = %d",
281 fdata->start_unit, rc);
282 return rc;
283 }
284 fdata->start_unit++;
285 }
286 return 0;
287}
288
289/*
290 * Format device.
291 */
292static int
293dasd_ioctl_format(struct block_device *bdev, int no, long args)
294{
295 struct dasd_device *device;
296 struct format_data_t fdata;
297
298 if (!capable(CAP_SYS_ADMIN))
299 return -EACCES;
300 if (!args)
301 return -EINVAL;
302 /* fdata == NULL is no longer a valid arg to dasd_format ! */
303 device = bdev->bd_disk->private_data;
304
305 if (device == NULL)
306 return -ENODEV;
307 if (test_bit(DASD_FLAG_RO, &device->flags))
308 return -EROFS;
309 if (copy_from_user(&fdata, (void __user *) args,
310 sizeof (struct format_data_t)))
311 return -EFAULT;
312 if (bdev != bdev->bd_contains) {
313 DEV_MESSAGE(KERN_WARNING, device, "%s",
314 "Cannot low-level format a partition");
315 return -EINVAL;
316 }
317 return dasd_format(device, &fdata);
318}
319
320#ifdef CONFIG_DASD_PROFILE
321/*
322 * Reset device profile information
323 */
324static int
325dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args)
326{
327 struct dasd_device *device;
328
329 if (!capable(CAP_SYS_ADMIN))
330 return -EACCES;
331
332 device = bdev->bd_disk->private_data;
333 if (device == NULL)
334 return -ENODEV;
335
336 memset(&device->profile, 0, sizeof (struct dasd_profile_info_t));
337 return 0;
338}
339
340/*
341 * Return device profile information
342 */
343static int
344dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
345{
346 struct dasd_device *device;
347
348 device = bdev->bd_disk->private_data;
349 if (device == NULL)
350 return -ENODEV;
351
352 if (copy_to_user((long __user *) args, (long *) &device->profile,
353 sizeof (struct dasd_profile_info_t)))
354 return -EFAULT;
355 return 0;
356}
357#else
358static int
359dasd_ioctl_reset_profile(struct block_device *bdev, int no, long args)
360{
361 return -ENOSYS;
362}
363
364static int
365dasd_ioctl_read_profile(struct block_device *bdev, int no, long args)
366{
367 return -ENOSYS;
368}
369#endif
370
371/*
372 * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
373 */
374static int
375dasd_ioctl_information(struct block_device *bdev, int no, long args)
376{
377 struct dasd_device *device;
378 struct dasd_information2_t *dasd_info;
379 unsigned long flags;
380 int rc;
381 struct ccw_device *cdev;
382
383 device = bdev->bd_disk->private_data;
384 if (device == NULL)
385 return -ENODEV;
386
387 if (!device->discipline->fill_info)
388 return -EINVAL;
389
390 dasd_info = kmalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
391 if (dasd_info == NULL)
392 return -ENOMEM;
393
394 rc = device->discipline->fill_info(device, dasd_info);
395 if (rc) {
396 kfree(dasd_info);
397 return rc;
398 }
399
400 cdev = device->cdev;
401
402 dasd_info->devno = _ccw_device_get_device_number(device->cdev);
403 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev);
404 dasd_info->cu_type = cdev->id.cu_type;
405 dasd_info->cu_model = cdev->id.cu_model;
406 dasd_info->dev_type = cdev->id.dev_type;
407 dasd_info->dev_model = cdev->id.dev_model;
408 dasd_info->open_count = atomic_read(&device->open_count);
409 dasd_info->status = device->state;
410
411 /*
412 * check if device is really formatted
413 * LDL / CDL was returned by 'fill_info'
414 */
415 if ((device->state < DASD_STATE_READY) ||
416 (dasd_check_blocksize(device->bp_block)))
417 dasd_info->format = DASD_FORMAT_NONE;
418
419 dasd_info->features |= test_bit(DASD_FLAG_RO, &device->flags) ?
420 DASD_FEATURE_READONLY : DASD_FEATURE_DEFAULT;
421
422 if (device->discipline)
423 memcpy(dasd_info->type, device->discipline->name, 4);
424 else
425 memcpy(dasd_info->type, "none", 4);
426 dasd_info->req_queue_len = 0;
427 dasd_info->chanq_len = 0;
428 if (device->request_queue->request_fn) {
429 struct list_head *l;
430#ifdef DASD_EXTENDED_PROFILING
431 {
432 struct list_head *l;
433 spin_lock_irqsave(&device->lock, flags);
434 list_for_each(l, &device->request_queue->queue_head)
435 dasd_info->req_queue_len++;
436 spin_unlock_irqrestore(&device->lock, flags);
437 }
438#endif /* DASD_EXTENDED_PROFILING */
439 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
440 list_for_each(l, &device->ccw_queue)
441 dasd_info->chanq_len++;
442 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
443 flags);
444 }
445
446 rc = 0;
447 if (copy_to_user((long __user *) args, (long *) dasd_info,
448 ((no == (unsigned int) BIODASDINFO2) ?
449 sizeof (struct dasd_information2_t) :
450 sizeof (struct dasd_information_t))))
451 rc = -EFAULT;
452 kfree(dasd_info);
453 return rc;
454}
455
456/*
457 * Set read only
458 */
459static int
460dasd_ioctl_set_ro(struct block_device *bdev, int no, long args)
461{
462 struct dasd_device *device;
463 int intval;
464
465 if (!capable(CAP_SYS_ADMIN))
466 return -EACCES;
467 if (bdev != bdev->bd_contains)
468 // ro setting is not allowed for partitions
469 return -EINVAL;
470 if (get_user(intval, (int __user *) args))
471 return -EFAULT;
472 device = bdev->bd_disk->private_data;
473 if (device == NULL)
474 return -ENODEV;
475 set_disk_ro(bdev->bd_disk, intval);
476 if (intval)
477 set_bit(DASD_FLAG_RO, &device->flags);
478 else
479 clear_bit(DASD_FLAG_RO, &device->flags);
480 return 0;
481}
482
483/*
484 * Return disk geometry.
485 */
486static int
487dasd_ioctl_getgeo(struct block_device *bdev, int no, long args)
488{
489 struct hd_geometry geo = { 0, };
490 struct dasd_device *device;
491
492 device = bdev->bd_disk->private_data;
493 if (device == NULL)
494 return -ENODEV;
495
496 if (device == NULL || device->discipline == NULL ||
497 device->discipline->fill_geometry == NULL)
498 return -EINVAL;
499
500 geo = (struct hd_geometry) {};
501 device->discipline->fill_geometry(device, &geo);
502 geo.start = get_start_sect(bdev) >> device->s2b_shift;
503 if (copy_to_user((struct hd_geometry __user *) args, &geo,
504 sizeof (struct hd_geometry)))
505 return -EFAULT;
506
507 return 0;
508}
509
510/*
511 * List of static ioctls.
512 */
513static struct { int no; dasd_ioctl_fn_t fn; } dasd_ioctls[] =
514{
515 { BIODASDDISABLE, dasd_ioctl_disable },
516 { BIODASDENABLE, dasd_ioctl_enable },
517 { BIODASDQUIESCE, dasd_ioctl_quiesce },
518 { BIODASDRESUME, dasd_ioctl_resume },
519 { BIODASDFMT, dasd_ioctl_format },
520 { BIODASDINFO, dasd_ioctl_information },
521 { BIODASDINFO2, dasd_ioctl_information },
522 { BIODASDPRRD, dasd_ioctl_read_profile },
523 { BIODASDPRRST, dasd_ioctl_reset_profile },
524 { BLKROSET, dasd_ioctl_set_ro },
525 { DASDAPIVER, dasd_ioctl_api_version },
526 { HDIO_GETGEO, dasd_ioctl_getgeo },
527 { -1, NULL }
528};
529
530int
531dasd_ioctl_init(void)
532{
533 int i;
534
535 for (i = 0; dasd_ioctls[i].no != -1; i++)
536 dasd_ioctl_no_register(NULL, dasd_ioctls[i].no,
537 dasd_ioctls[i].fn);
538 return 0;
539
540}
541
542void
543dasd_ioctl_exit(void)
544{
545 int i;
546
547 for (i = 0; dasd_ioctls[i].no != -1; i++)
548 dasd_ioctl_no_unregister(NULL, dasd_ioctls[i].no,
549 dasd_ioctls[i].fn);
550
551}
552
553EXPORT_SYMBOL(dasd_ioctl_no_register);
554EXPORT_SYMBOL(dasd_ioctl_no_unregister);
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
new file mode 100644
index 000000000000..353d41118c62
--- /dev/null
+++ b/drivers/s390/block/dasd_proc.c
@@ -0,0 +1,319 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_proc.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
9 *
10 * /proc interface for the dasd driver.
11 *
12 * $Revision: 1.30 $
13 */
14
15#include <linux/config.h>
16#include <linux/ctype.h>
17#include <linux/seq_file.h>
18#include <linux/vmalloc.h>
19
20#include <asm/debug.h>
21#include <asm/uaccess.h>
22
23/* This is ugly... */
24#define PRINTK_HEADER "dasd_proc:"
25
26#include "dasd_int.h"
27
28static struct proc_dir_entry *dasd_proc_root_entry = NULL;
29static struct proc_dir_entry *dasd_devices_entry = NULL;
30static struct proc_dir_entry *dasd_statistics_entry = NULL;
31
32static inline char *
33dasd_get_user_string(const char __user *user_buf, size_t user_len)
34{
35 char *buffer;
36
37 buffer = kmalloc(user_len + 1, GFP_KERNEL);
38 if (buffer == NULL)
39 return ERR_PTR(-ENOMEM);
40 if (copy_from_user(buffer, user_buf, user_len) != 0) {
41 kfree(buffer);
42 return ERR_PTR(-EFAULT);
43 }
44 /* got the string, now strip linefeed. */
45 if (buffer[user_len - 1] == '\n')
46 buffer[user_len - 1] = 0;
47 else
48 buffer[user_len] = 0;
49 return buffer;
50}
51
52static int
53dasd_devices_show(struct seq_file *m, void *v)
54{
55 struct dasd_device *device;
56 char *substr;
57
58 device = dasd_device_from_devindex((unsigned long) v - 1);
59 if (IS_ERR(device))
60 return 0;
61 /* Print device number. */
62 seq_printf(m, "%s", device->cdev->dev.bus_id);
63 /* Print discipline string. */
64 if (device != NULL && device->discipline != NULL)
65 seq_printf(m, "(%s)", device->discipline->name);
66 else
67 seq_printf(m, "(none)");
68 /* Print kdev. */
69 if (device->gdp)
70 seq_printf(m, " at (%3d:%6d)",
71 device->gdp->major, device->gdp->first_minor);
72 else
73 seq_printf(m, " at (???:??????)");
74 /* Print device name. */
75 if (device->gdp)
76 seq_printf(m, " is %-8s", device->gdp->disk_name);
77 else
78 seq_printf(m, " is ????????");
79 /* Print devices features. */
80 substr = test_bit(DASD_FLAG_RO, &device->flags) ? "(ro)" : " ";
81 seq_printf(m, "%4s: ", substr);
82 /* Print device status information. */
83 switch ((device != NULL) ? device->state : -1) {
84 case -1:
85 seq_printf(m, "unknown");
86 break;
87 case DASD_STATE_NEW:
88 seq_printf(m, "new");
89 break;
90 case DASD_STATE_KNOWN:
91 seq_printf(m, "detected");
92 break;
93 case DASD_STATE_BASIC:
94 seq_printf(m, "basic");
95 break;
96 case DASD_STATE_READY:
97 case DASD_STATE_ONLINE:
98 seq_printf(m, "active ");
99 if (dasd_check_blocksize(device->bp_block))
100 seq_printf(m, "n/f ");
101 else
102 seq_printf(m,
103 "at blocksize: %d, %ld blocks, %ld MB",
104 device->bp_block, device->blocks,
105 ((device->bp_block >> 9) *
106 device->blocks) >> 11);
107 break;
108 default:
109 seq_printf(m, "no stat");
110 break;
111 }
112 dasd_put_device(device);
113 if (dasd_probeonly)
114 seq_printf(m, "(probeonly)");
115 seq_printf(m, "\n");
116 return 0;
117}
118
119static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
120{
121 if (*pos >= dasd_max_devindex)
122 return NULL;
123 return (void *)((unsigned long) *pos + 1);
124}
125
126static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
127{
128 ++*pos;
129 return dasd_devices_start(m, pos);
130}
131
132static void dasd_devices_stop(struct seq_file *m, void *v)
133{
134}
135
136static struct seq_operations dasd_devices_seq_ops = {
137 .start = dasd_devices_start,
138 .next = dasd_devices_next,
139 .stop = dasd_devices_stop,
140 .show = dasd_devices_show,
141};
142
143static int dasd_devices_open(struct inode *inode, struct file *file)
144{
145 return seq_open(file, &dasd_devices_seq_ops);
146}
147
148static struct file_operations dasd_devices_file_ops = {
149 .open = dasd_devices_open,
150 .read = seq_read,
151 .llseek = seq_lseek,
152 .release = seq_release,
153};
154
155static inline int
156dasd_calc_metrics(char *page, char **start, off_t off,
157 int count, int *eof, int len)
158{
159 len = (len > off) ? len - off : 0;
160 if (len > count)
161 len = count;
162 if (len < count)
163 *eof = 1;
164 *start = page + off;
165 return len;
166}
167
168static inline char *
169dasd_statistics_array(char *str, int *array, int shift)
170{
171 int i;
172
173 for (i = 0; i < 32; i++) {
174 str += sprintf(str, "%7d ", array[i] >> shift);
175 if (i == 15)
176 str += sprintf(str, "\n");
177 }
178 str += sprintf(str,"\n");
179 return str;
180}
181
182static int
183dasd_statistics_read(char *page, char **start, off_t off,
184 int count, int *eof, void *data)
185{
186 unsigned long len;
187#ifdef CONFIG_DASD_PROFILE
188 struct dasd_profile_info_t *prof;
189 char *str;
190 int shift;
191
192 /* check for active profiling */
193 if (dasd_profile_level == DASD_PROFILE_OFF) {
194 len = sprintf(page, "Statistics are off - they might be "
195 "switched on using 'echo set on > "
196 "/proc/dasd/statistics'\n");
197 return dasd_calc_metrics(page, start, off, count, eof, len);
198 }
199
200 prof = &dasd_global_profile;
201 /* prevent couter 'overflow' on output */
202 for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++);
203
204 str = page;
205 str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs);
206 str += sprintf(str, "with %d sectors(512B each)\n",
207 prof->dasd_io_sects);
208 str += sprintf(str,
209 " __<4 ___8 __16 __32 __64 _128 "
210 " _256 _512 __1k __2k __4k __8k "
211 " _16k _32k _64k 128k\n");
212 str += sprintf(str,
213 " _256 _512 __1M __2M __4M __8M "
214 " _16M _32M _64M 128M 256M 512M "
215 " __1G __2G __4G " " _>4G\n");
216
217 str += sprintf(str, "Histogram of sizes (512B secs)\n");
218 str = dasd_statistics_array(str, prof->dasd_io_secs, shift);
219 str += sprintf(str, "Histogram of I/O times (microseconds)\n");
220 str = dasd_statistics_array(str, prof->dasd_io_times, shift);
221 str += sprintf(str, "Histogram of I/O times per sector\n");
222 str = dasd_statistics_array(str, prof->dasd_io_timps, shift);
223 str += sprintf(str, "Histogram of I/O time till ssch\n");
224 str = dasd_statistics_array(str, prof->dasd_io_time1, shift);
225 str += sprintf(str, "Histogram of I/O time between ssch and irq\n");
226 str = dasd_statistics_array(str, prof->dasd_io_time2, shift);
227 str += sprintf(str, "Histogram of I/O time between ssch "
228 "and irq per sector\n");
229 str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift);
230 str += sprintf(str, "Histogram of I/O time between irq and end\n");
231 str = dasd_statistics_array(str, prof->dasd_io_time3, shift);
232 str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n");
233 str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift);
234 len = str - page;
235#else
236 len = sprintf(page, "Statistics are not activated in this kernel\n");
237#endif
238 return dasd_calc_metrics(page, start, off, count, eof, len);
239}
240
241static int
242dasd_statistics_write(struct file *file, const char __user *user_buf,
243 unsigned long user_len, void *data)
244{
245#ifdef CONFIG_DASD_PROFILE
246 char *buffer, *str;
247
248 if (user_len > 65536)
249 user_len = 65536;
250 buffer = dasd_get_user_string(user_buf, user_len);
251 if (IS_ERR(buffer))
252 return PTR_ERR(buffer);
253 MESSAGE_LOG(KERN_INFO, "/proc/dasd/statictics: '%s'", buffer);
254
255 /* check for valid verbs */
256 for (str = buffer; isspace(*str); str++);
257 if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
258 /* 'set xxx' was given */
259 for (str = str + 4; isspace(*str); str++);
260 if (strcmp(str, "on") == 0) {
261 /* switch on statistics profiling */
262 dasd_profile_level = DASD_PROFILE_ON;
263 MESSAGE(KERN_INFO, "%s", "Statistics switched on");
264 } else if (strcmp(str, "off") == 0) {
265 /* switch off and reset statistics profiling */
266 memset(&dasd_global_profile,
267 0, sizeof (struct dasd_profile_info_t));
268 dasd_profile_level = DASD_PROFILE_OFF;
269 MESSAGE(KERN_INFO, "%s", "Statistics switched off");
270 } else
271 goto out_error;
272 } else if (strncmp(str, "reset", 5) == 0) {
273 /* reset the statistics */
274 memset(&dasd_global_profile, 0,
275 sizeof (struct dasd_profile_info_t));
276 MESSAGE(KERN_INFO, "%s", "Statistics reset");
277 } else
278 goto out_error;
279 kfree(buffer);
280 return user_len;
281out_error:
282 MESSAGE(KERN_WARNING, "%s",
283 "/proc/dasd/statistics: only 'set on', 'set off' "
284 "and 'reset' are supported verbs");
285 kfree(buffer);
286 return -EINVAL;
287#else
288 MESSAGE(KERN_WARNING, "%s",
289 "/proc/dasd/statistics: is not activated in this kernel");
290 return user_len;
291#endif /* CONFIG_DASD_PROFILE */
292}
293
294int
295dasd_proc_init(void)
296{
297 dasd_proc_root_entry = proc_mkdir("dasd", &proc_root);
298 dasd_proc_root_entry->owner = THIS_MODULE;
299 dasd_devices_entry = create_proc_entry("devices",
300 S_IFREG | S_IRUGO | S_IWUSR,
301 dasd_proc_root_entry);
302 dasd_devices_entry->proc_fops = &dasd_devices_file_ops;
303 dasd_devices_entry->owner = THIS_MODULE;
304 dasd_statistics_entry = create_proc_entry("statistics",
305 S_IFREG | S_IRUGO | S_IWUSR,
306 dasd_proc_root_entry);
307 dasd_statistics_entry->read_proc = dasd_statistics_read;
308 dasd_statistics_entry->write_proc = dasd_statistics_write;
309 dasd_statistics_entry->owner = THIS_MODULE;
310 return 0;
311}
312
313void
314dasd_proc_exit(void)
315{
316 remove_proc_entry("devices", dasd_proc_root_entry);
317 remove_proc_entry("statistics", dasd_proc_root_entry);
318 remove_proc_entry("dasd", &proc_root);
319}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
new file mode 100644
index 000000000000..a66b17b65296
--- /dev/null
+++ b/drivers/s390/block/dcssblk.c
@@ -0,0 +1,775 @@
1/*
2 * dcssblk.c -- the S/390 block driver for dcss memory
3 *
4 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
5 */
6
7#include <linux/module.h>
8#include <linux/moduleparam.h>
9#include <linux/ctype.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/blkdev.h>
14#include <asm/extmem.h>
15#include <asm/io.h>
16#include <linux/completion.h>
17#include <linux/interrupt.h>
18#include <asm/ccwdev.h> // for s390_root_dev_(un)register()
19
20//#define DCSSBLK_DEBUG /* Debug messages on/off */
21#define DCSSBLK_NAME "dcssblk"
22#define DCSSBLK_MINORS_PER_DISK 1
23#define DCSSBLK_PARM_LEN 400
24
25#ifdef DCSSBLK_DEBUG
26#define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
27#else
28#define PRINT_DEBUG(x...) do {} while (0)
29#endif
30#define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
31#define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
32#define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
33
34
35static int dcssblk_open(struct inode *inode, struct file *filp);
36static int dcssblk_release(struct inode *inode, struct file *filp);
37static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
38
39static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
40
41static int dcssblk_major;
42static struct block_device_operations dcssblk_devops = {
43 .owner = THIS_MODULE,
44 .open = dcssblk_open,
45 .release = dcssblk_release,
46};
47
48static ssize_t dcssblk_add_store(struct device * dev, const char * buf,
49 size_t count);
50static ssize_t dcssblk_remove_store(struct device * dev, const char * buf,
51 size_t count);
52static ssize_t dcssblk_save_store(struct device * dev, const char * buf,
53 size_t count);
54static ssize_t dcssblk_save_show(struct device *dev, char *buf);
55static ssize_t dcssblk_shared_store(struct device * dev, const char * buf,
56 size_t count);
57static ssize_t dcssblk_shared_show(struct device *dev, char *buf);
58
59static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
60static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
61static DEVICE_ATTR(save, S_IWUSR | S_IRUGO, dcssblk_save_show,
62 dcssblk_save_store);
63static DEVICE_ATTR(shared, S_IWUSR | S_IRUGO, dcssblk_shared_show,
64 dcssblk_shared_store);
65
66static struct device *dcssblk_root_dev;
67
68struct dcssblk_dev_info {
69 struct list_head lh;
70 struct device dev;
71 char segment_name[BUS_ID_SIZE];
72 atomic_t use_count;
73 struct gendisk *gd;
74 unsigned long start;
75 unsigned long end;
76 int segment_type;
77 unsigned char save_pending;
78 unsigned char is_shared;
79 struct request_queue *dcssblk_queue;
80};
81
82static struct list_head dcssblk_devices = LIST_HEAD_INIT(dcssblk_devices);
83static struct rw_semaphore dcssblk_devices_sem;
84
85/*
86 * release function for segment device.
87 */
88static void
89dcssblk_release_segment(struct device *dev)
90{
91 PRINT_DEBUG("segment release fn called for %s\n", dev->bus_id);
92 kfree(container_of(dev, struct dcssblk_dev_info, dev));
93 module_put(THIS_MODULE);
94}
95
96/*
97 * get a minor number. needs to be called with
98 * down_write(&dcssblk_devices_sem) and the
99 * device needs to be enqueued before the semaphore is
100 * freed.
101 */
102static inline int
103dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
104{
105 int minor, found;
106 struct dcssblk_dev_info *entry;
107
108 if (dev_info == NULL)
109 return -EINVAL;
110 for (minor = 0; minor < (1<<MINORBITS); minor++) {
111 found = 0;
112 // test if minor available
113 list_for_each_entry(entry, &dcssblk_devices, lh)
114 if (minor == entry->gd->first_minor)
115 found++;
116 if (!found) break; // got unused minor
117 }
118 if (found)
119 return -EBUSY;
120 dev_info->gd->first_minor = minor;
121 return 0;
122}
123
124/*
125 * get the struct dcssblk_dev_info from dcssblk_devices
126 * for the given name.
127 * down_read(&dcssblk_devices_sem) must be held.
128 */
129static struct dcssblk_dev_info *
130dcssblk_get_device_by_name(char *name)
131{
132 struct dcssblk_dev_info *entry;
133
134 list_for_each_entry(entry, &dcssblk_devices, lh) {
135 if (!strcmp(name, entry->segment_name)) {
136 return entry;
137 }
138 }
139 return NULL;
140}
141
142/*
143 * print appropriate error message for segment_load()/segment_type()
144 * return code
145 */
146static void
147dcssblk_segment_warn(int rc, char* seg_name)
148{
149 switch (rc) {
150 case -ENOENT:
151 PRINT_WARN("cannot load/query segment %s, does not exist\n",
152 seg_name);
153 break;
154 case -ENOSYS:
155 PRINT_WARN("cannot load/query segment %s, not running on VM\n",
156 seg_name);
157 break;
158 case -EIO:
159 PRINT_WARN("cannot load/query segment %s, hardware error\n",
160 seg_name);
161 break;
162 case -ENOTSUPP:
163 PRINT_WARN("cannot load/query segment %s, is a multi-part "
164 "segment\n", seg_name);
165 break;
166 case -ENOSPC:
167 PRINT_WARN("cannot load/query segment %s, overlaps with "
168 "storage\n", seg_name);
169 break;
170 case -EBUSY:
171 PRINT_WARN("cannot load/query segment %s, overlaps with "
172 "already loaded dcss\n", seg_name);
173 break;
174 case -EPERM:
175 PRINT_WARN("cannot load/query segment %s, already loaded in "
176 "incompatible mode\n", seg_name);
177 break;
178 case -ENOMEM:
179 PRINT_WARN("cannot load/query segment %s, out of memory\n",
180 seg_name);
181 break;
182 case -ERANGE:
183 PRINT_WARN("cannot load/query segment %s, exceeds kernel "
184 "mapping range\n", seg_name);
185 break;
186 default:
187 PRINT_WARN("cannot load/query segment %s, return value %i\n",
188 seg_name, rc);
189 break;
190 }
191}
192
193/*
194 * device attribute for switching shared/nonshared (exclusive)
195 * operation (show + store)
196 */
197static ssize_t
198dcssblk_shared_show(struct device *dev, char *buf)
199{
200 struct dcssblk_dev_info *dev_info;
201
202 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
203 return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
204}
205
206static ssize_t
207dcssblk_shared_store(struct device *dev, const char *inbuf, size_t count)
208{
209 struct dcssblk_dev_info *dev_info;
210 int rc;
211
212 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
213 PRINT_WARN("Invalid value, must be 0 or 1\n");
214 return -EINVAL;
215 }
216 down_write(&dcssblk_devices_sem);
217 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
218 if (atomic_read(&dev_info->use_count)) {
219 PRINT_ERR("share: segment %s is busy!\n",
220 dev_info->segment_name);
221 rc = -EBUSY;
222 goto out;
223 }
224 if (inbuf[0] == '1') {
225 // reload segment in shared mode
226 rc = segment_modify_shared(dev_info->segment_name,
227 SEGMENT_SHARED);
228 if (rc < 0) {
229 BUG_ON(rc == -EINVAL);
230 if (rc == -EIO || rc == -ENOENT)
231 goto removeseg;
232 } else {
233 dev_info->is_shared = 1;
234 switch (dev_info->segment_type) {
235 case SEG_TYPE_SR:
236 case SEG_TYPE_ER:
237 case SEG_TYPE_SC:
238 set_disk_ro(dev_info->gd,1);
239 }
240 }
241 } else if (inbuf[0] == '0') {
242 // reload segment in exclusive mode
243 if (dev_info->segment_type == SEG_TYPE_SC) {
244 PRINT_ERR("Segment type SC (%s) cannot be loaded in "
245 "non-shared mode\n", dev_info->segment_name);
246 rc = -EINVAL;
247 goto out;
248 }
249 rc = segment_modify_shared(dev_info->segment_name,
250 SEGMENT_EXCLUSIVE);
251 if (rc < 0) {
252 BUG_ON(rc == -EINVAL);
253 if (rc == -EIO || rc == -ENOENT)
254 goto removeseg;
255 } else {
256 dev_info->is_shared = 0;
257 set_disk_ro(dev_info->gd, 0);
258 }
259 } else {
260 PRINT_WARN("Invalid value, must be 0 or 1\n");
261 rc = -EINVAL;
262 goto out;
263 }
264 rc = count;
265 goto out;
266
267removeseg:
268 PRINT_ERR("Could not reload segment %s, removing it now!\n",
269 dev_info->segment_name);
270 list_del(&dev_info->lh);
271
272 del_gendisk(dev_info->gd);
273 blk_put_queue(dev_info->dcssblk_queue);
274 dev_info->gd->queue = NULL;
275 put_disk(dev_info->gd);
276 device_unregister(dev);
277 put_device(dev);
278out:
279 up_write(&dcssblk_devices_sem);
280 return rc;
281}
282
283/*
284 * device attribute for save operation on current copy
285 * of the segment. If the segment is busy, saving will
286 * become pending until it gets released, which can be
287 * undone by storing a non-true value to this entry.
288 * (show + store)
289 */
290static ssize_t
291dcssblk_save_show(struct device *dev, char *buf)
292{
293 struct dcssblk_dev_info *dev_info;
294
295 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
296 return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
297}
298
299static ssize_t
300dcssblk_save_store(struct device *dev, const char *inbuf, size_t count)
301{
302 struct dcssblk_dev_info *dev_info;
303
304 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) {
305 PRINT_WARN("Invalid value, must be 0 or 1\n");
306 return -EINVAL;
307 }
308 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
309
310 down_write(&dcssblk_devices_sem);
311 if (inbuf[0] == '1') {
312 if (atomic_read(&dev_info->use_count) == 0) {
313 // device is idle => we save immediately
314 PRINT_INFO("Saving segment %s\n",
315 dev_info->segment_name);
316 segment_save(dev_info->segment_name);
317 } else {
318 // device is busy => we save it when it becomes
319 // idle in dcssblk_release
320 PRINT_INFO("Segment %s is currently busy, it will "
321 "be saved when it becomes idle...\n",
322 dev_info->segment_name);
323 dev_info->save_pending = 1;
324 }
325 } else if (inbuf[0] == '0') {
326 if (dev_info->save_pending) {
327 // device is busy & the user wants to undo his save
328 // request
329 dev_info->save_pending = 0;
330 PRINT_INFO("Pending save for segment %s deactivated\n",
331 dev_info->segment_name);
332 }
333 } else {
334 up_write(&dcssblk_devices_sem);
335 PRINT_WARN("Invalid value, must be 0 or 1\n");
336 return -EINVAL;
337 }
338 up_write(&dcssblk_devices_sem);
339 return count;
340}
341
342/*
343 * device attribute for adding devices
344 */
345static ssize_t
346dcssblk_add_store(struct device *dev, const char *buf, size_t count)
347{
348 int rc, i;
349 struct dcssblk_dev_info *dev_info;
350 char *local_buf;
351 unsigned long seg_byte_size;
352
353 dev_info = NULL;
354 if (dev != dcssblk_root_dev) {
355 rc = -EINVAL;
356 goto out_nobuf;
357 }
358 local_buf = kmalloc(count + 1, GFP_KERNEL);
359 if (local_buf == NULL) {
360 rc = -ENOMEM;
361 goto out_nobuf;
362 }
363 /*
364 * parse input
365 */
366 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
367 local_buf[i] = toupper(buf[i]);
368 }
369 local_buf[i] = '\0';
370 if ((i == 0) || (i > 8)) {
371 rc = -ENAMETOOLONG;
372 goto out;
373 }
374 /*
375 * already loaded?
376 */
377 down_read(&dcssblk_devices_sem);
378 dev_info = dcssblk_get_device_by_name(local_buf);
379 up_read(&dcssblk_devices_sem);
380 if (dev_info != NULL) {
381 PRINT_WARN("Segment %s already loaded!\n", local_buf);
382 rc = -EEXIST;
383 goto out;
384 }
385 /*
386 * get a struct dcssblk_dev_info
387 */
388 dev_info = kmalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL);
389 if (dev_info == NULL) {
390 rc = -ENOMEM;
391 goto out;
392 }
393 memset(dev_info, 0, sizeof(struct dcssblk_dev_info));
394
395 strcpy(dev_info->segment_name, local_buf);
396 strlcpy(dev_info->dev.bus_id, local_buf, BUS_ID_SIZE);
397 dev_info->dev.release = dcssblk_release_segment;
398 INIT_LIST_HEAD(&dev_info->lh);
399
400 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
401 if (dev_info->gd == NULL) {
402 rc = -ENOMEM;
403 goto free_dev_info;
404 }
405 dev_info->gd->major = dcssblk_major;
406 dev_info->gd->fops = &dcssblk_devops;
407 dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
408 dev_info->gd->queue = dev_info->dcssblk_queue;
409 dev_info->gd->private_data = dev_info;
410 dev_info->gd->driverfs_dev = &dev_info->dev;
411 /*
412 * load the segment
413 */
414 rc = segment_load(local_buf, SEGMENT_SHARED,
415 &dev_info->start, &dev_info->end);
416 if (rc < 0) {
417 dcssblk_segment_warn(rc, dev_info->segment_name);
418 goto dealloc_gendisk;
419 }
420 seg_byte_size = (dev_info->end - dev_info->start + 1);
421 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
422 PRINT_INFO("Loaded segment %s, size = %lu Byte, "
423 "capacity = %lu (512 Byte) sectors\n", local_buf,
424 seg_byte_size, seg_byte_size >> 9);
425
426 dev_info->segment_type = rc;
427 dev_info->save_pending = 0;
428 dev_info->is_shared = 1;
429 dev_info->dev.parent = dcssblk_root_dev;
430
431 /*
432 * get minor, add to list
433 */
434 down_write(&dcssblk_devices_sem);
435 rc = dcssblk_assign_free_minor(dev_info);
436 if (rc) {
437 up_write(&dcssblk_devices_sem);
438 PRINT_ERR("No free minor number available! "
439 "Unloading segment...\n");
440 goto unload_seg;
441 }
442 sprintf(dev_info->gd->disk_name, "dcssblk%d",
443 dev_info->gd->first_minor);
444 list_add_tail(&dev_info->lh, &dcssblk_devices);
445
446 if (!try_module_get(THIS_MODULE)) {
447 rc = -ENODEV;
448 goto list_del;
449 }
450 /*
451 * register the device
452 */
453 rc = device_register(&dev_info->dev);
454 if (rc) {
455 PRINT_ERR("Segment %s could not be registered RC=%d\n",
456 local_buf, rc);
457 module_put(THIS_MODULE);
458 goto list_del;
459 }
460 get_device(&dev_info->dev);
461 rc = device_create_file(&dev_info->dev, &dev_attr_shared);
462 if (rc)
463 goto unregister_dev;
464 rc = device_create_file(&dev_info->dev, &dev_attr_save);
465 if (rc)
466 goto unregister_dev;
467
468 add_disk(dev_info->gd);
469
470 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
471 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
472
473 switch (dev_info->segment_type) {
474 case SEG_TYPE_SR:
475 case SEG_TYPE_ER:
476 case SEG_TYPE_SC:
477 set_disk_ro(dev_info->gd,1);
478 break;
479 default:
480 set_disk_ro(dev_info->gd,0);
481 break;
482 }
483 PRINT_DEBUG("Segment %s loaded successfully\n", local_buf);
484 up_write(&dcssblk_devices_sem);
485 rc = count;
486 goto out;
487
488unregister_dev:
489 PRINT_ERR("device_create_file() failed!\n");
490 list_del(&dev_info->lh);
491 blk_put_queue(dev_info->dcssblk_queue);
492 dev_info->gd->queue = NULL;
493 put_disk(dev_info->gd);
494 device_unregister(&dev_info->dev);
495 segment_unload(dev_info->segment_name);
496 put_device(&dev_info->dev);
497 up_write(&dcssblk_devices_sem);
498 goto out;
499list_del:
500 list_del(&dev_info->lh);
501 up_write(&dcssblk_devices_sem);
502unload_seg:
503 segment_unload(local_buf);
504dealloc_gendisk:
505 blk_put_queue(dev_info->dcssblk_queue);
506 dev_info->gd->queue = NULL;
507 put_disk(dev_info->gd);
508free_dev_info:
509 kfree(dev_info);
510out:
511 kfree(local_buf);
512out_nobuf:
513 return rc;
514}
515
516/*
517 * device attribute for removing devices
518 */
519static ssize_t
520dcssblk_remove_store(struct device *dev, const char *buf, size_t count)
521{
522 struct dcssblk_dev_info *dev_info;
523 int rc, i;
524 char *local_buf;
525
526 if (dev != dcssblk_root_dev) {
527 return -EINVAL;
528 }
529 local_buf = kmalloc(count + 1, GFP_KERNEL);
530 if (local_buf == NULL) {
531 return -ENOMEM;
532 }
533 /*
534 * parse input
535 */
536 for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
537 local_buf[i] = toupper(buf[i]);
538 }
539 local_buf[i] = '\0';
540 if ((i == 0) || (i > 8)) {
541 rc = -ENAMETOOLONG;
542 goto out_buf;
543 }
544
545 down_write(&dcssblk_devices_sem);
546 dev_info = dcssblk_get_device_by_name(local_buf);
547 if (dev_info == NULL) {
548 up_write(&dcssblk_devices_sem);
549 PRINT_WARN("Segment %s is not loaded!\n", local_buf);
550 rc = -ENODEV;
551 goto out_buf;
552 }
553 if (atomic_read(&dev_info->use_count) != 0) {
554 up_write(&dcssblk_devices_sem);
555 PRINT_WARN("Segment %s is in use!\n", local_buf);
556 rc = -EBUSY;
557 goto out_buf;
558 }
559 list_del(&dev_info->lh);
560
561 del_gendisk(dev_info->gd);
562 blk_put_queue(dev_info->dcssblk_queue);
563 dev_info->gd->queue = NULL;
564 put_disk(dev_info->gd);
565 device_unregister(&dev_info->dev);
566 segment_unload(dev_info->segment_name);
567 PRINT_DEBUG("Segment %s unloaded successfully\n",
568 dev_info->segment_name);
569 put_device(&dev_info->dev);
570 up_write(&dcssblk_devices_sem);
571
572 rc = count;
573out_buf:
574 kfree(local_buf);
575 return rc;
576}
577
578static int
579dcssblk_open(struct inode *inode, struct file *filp)
580{
581 struct dcssblk_dev_info *dev_info;
582 int rc;
583
584 dev_info = inode->i_bdev->bd_disk->private_data;
585 if (NULL == dev_info) {
586 rc = -ENODEV;
587 goto out;
588 }
589 atomic_inc(&dev_info->use_count);
590 inode->i_bdev->bd_block_size = 4096;
591 rc = 0;
592out:
593 return rc;
594}
595
596static int
597dcssblk_release(struct inode *inode, struct file *filp)
598{
599 struct dcssblk_dev_info *dev_info;
600 int rc;
601
602 dev_info = inode->i_bdev->bd_disk->private_data;
603 if (NULL == dev_info) {
604 rc = -ENODEV;
605 goto out;
606 }
607 down_write(&dcssblk_devices_sem);
608 if (atomic_dec_and_test(&dev_info->use_count)
609 && (dev_info->save_pending)) {
610 PRINT_INFO("Segment %s became idle and is being saved now\n",
611 dev_info->segment_name);
612 segment_save(dev_info->segment_name);
613 dev_info->save_pending = 0;
614 }
615 up_write(&dcssblk_devices_sem);
616 rc = 0;
617out:
618 return rc;
619}
620
621static int
622dcssblk_make_request(request_queue_t *q, struct bio *bio)
623{
624 struct dcssblk_dev_info *dev_info;
625 struct bio_vec *bvec;
626 unsigned long index;
627 unsigned long page_addr;
628 unsigned long source_addr;
629 unsigned long bytes_done;
630 int i;
631
632 bytes_done = 0;
633 dev_info = bio->bi_bdev->bd_disk->private_data;
634 if (dev_info == NULL)
635 goto fail;
636 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
637 /* Request is not page-aligned. */
638 goto fail;
639 if (((bio->bi_size >> 9) + bio->bi_sector)
640 > get_capacity(bio->bi_bdev->bd_disk)) {
641 /* Request beyond end of DCSS segment. */
642 goto fail;
643 }
644 index = (bio->bi_sector >> 3);
645 bio_for_each_segment(bvec, bio, i) {
646 page_addr = (unsigned long)
647 page_address(bvec->bv_page) + bvec->bv_offset;
648 source_addr = dev_info->start + (index<<12) + bytes_done;
649 if (unlikely(page_addr & 4095) != 0 || (bvec->bv_len & 4095) != 0)
650 // More paranoia.
651 goto fail;
652 if (bio_data_dir(bio) == READ) {
653 memcpy((void*)page_addr, (void*)source_addr,
654 bvec->bv_len);
655 } else {
656 memcpy((void*)source_addr, (void*)page_addr,
657 bvec->bv_len);
658 }
659 bytes_done += bvec->bv_len;
660 }
661 bio_endio(bio, bytes_done, 0);
662 return 0;
663fail:
664 bio_io_error(bio, bytes_done);
665 return 0;
666}
667
668static void
669dcssblk_check_params(void)
670{
671 int rc, i, j, k;
672 char buf[9];
673 struct dcssblk_dev_info *dev_info;
674
675 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
676 i++) {
677 for (j = i; (dcssblk_segments[j] != ',') &&
678 (dcssblk_segments[j] != '\0') &&
679 (dcssblk_segments[j] != '(') &&
680 (j - i) < 8; j++)
681 {
682 buf[j-i] = dcssblk_segments[j];
683 }
684 buf[j-i] = '\0';
685 rc = dcssblk_add_store(dcssblk_root_dev, buf, j-i);
686 if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
687 for (k = 0; buf[k] != '\0'; k++)
688 buf[k] = toupper(buf[k]);
689 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
690 down_read(&dcssblk_devices_sem);
691 dev_info = dcssblk_get_device_by_name(buf);
692 up_read(&dcssblk_devices_sem);
693 if (dev_info)
694 dcssblk_shared_store(&dev_info->dev,
695 "0\n", 2);
696 }
697 }
698 while ((dcssblk_segments[j] != ',') &&
699 (dcssblk_segments[j] != '\0'))
700 {
701 j++;
702 }
703 if (dcssblk_segments[j] == '\0')
704 break;
705 i = j;
706 }
707}
708
709/*
710 * The init/exit functions.
711 */
712static void __exit
713dcssblk_exit(void)
714{
715 int rc;
716
717 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
718 s390_root_dev_unregister(dcssblk_root_dev);
719 rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
720 if (rc) {
721 PRINT_ERR("unregister_blkdev() failed!\n");
722 }
723 PRINT_DEBUG("...finished!\n");
724}
725
726static int __init
727dcssblk_init(void)
728{
729 int rc;
730
731 PRINT_DEBUG("DCSSBLOCK INIT...\n");
732 dcssblk_root_dev = s390_root_dev_register("dcssblk");
733 if (IS_ERR(dcssblk_root_dev)) {
734 PRINT_ERR("device_register() failed!\n");
735 return PTR_ERR(dcssblk_root_dev);
736 }
737 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
738 if (rc) {
739 PRINT_ERR("device_create_file(add) failed!\n");
740 s390_root_dev_unregister(dcssblk_root_dev);
741 return rc;
742 }
743 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
744 if (rc) {
745 PRINT_ERR("device_create_file(remove) failed!\n");
746 s390_root_dev_unregister(dcssblk_root_dev);
747 return rc;
748 }
749 rc = register_blkdev(0, DCSSBLK_NAME);
750 if (rc < 0) {
751 PRINT_ERR("Can't get dynamic major!\n");
752 s390_root_dev_unregister(dcssblk_root_dev);
753 return rc;
754 }
755 dcssblk_major = rc;
756 init_rwsem(&dcssblk_devices_sem);
757
758 dcssblk_check_params();
759
760 PRINT_DEBUG("...finished!\n");
761 return 0;
762}
763
764module_init(dcssblk_init);
765module_exit(dcssblk_exit);
766
767module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
768MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
769 "comma-separated list, each name max. 8 chars.\n"
770 "Adding \"(local)\" to segment name equals echoing 0 to "
771 "/sys/devices/dcssblk/<segment name>/shared after loading "
772 "the segment - \n"
773 "e.g. segments=\"mydcss1,mydcss2,mydcss3(local)\"");
774
775MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
new file mode 100644
index 000000000000..d428c909b8a0
--- /dev/null
+++ b/drivers/s390/block/xpram.c
@@ -0,0 +1,539 @@
1/*
2 * Xpram.c -- the S/390 expanded memory RAM-disk
3 *
4 * significant parts of this code are based on
5 * the sbull device driver presented in
6 * A. Rubini: Linux Device Drivers
7 *
8 * Author of XPRAM specific coding: Reinhard Buendgen
9 * buendgen@de.ibm.com
10 * Rewrite for 2.5: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 *
12 * External interfaces:
13 * Interfaces to linux kernel
14 * xpram_setup: read kernel parameters
15 * Device specific file operations
16 * xpram_iotcl
17 * xpram_open
18 *
19 * "ad-hoc" partitioning:
20 * the expanded memory can be partitioned among several devices
21 * (with different minors). The partitioning set up can be
22 * set by kernel or module parameters (int devs & int sizes[])
23 *
24 * Potential future improvements:
25 * generic hard disk support to replace ad-hoc partitioning
26 */
27
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30#include <linux/ctype.h> /* isdigit, isxdigit */
31#include <linux/errno.h>
32#include <linux/init.h>
33#include <linux/slab.h>
34#include <linux/blkdev.h>
35#include <linux/blkpg.h>
36#include <linux/hdreg.h> /* HDIO_GETGEO */
37#include <linux/sysdev.h>
38#include <linux/bio.h>
39#include <linux/devfs_fs_kernel.h>
40#include <asm/uaccess.h>
41
42#define XPRAM_NAME "xpram"
43#define XPRAM_DEVS 1 /* one partition */
44#define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */
45
46#define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x)
47#define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x)
48#define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x)
49#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
50
51
52static struct sysdev_class xpram_sysclass = {
53 set_kset_name("xpram"),
54};
55
56static struct sys_device xpram_sys_device = {
57 .id = 0,
58 .cls = &xpram_sysclass,
59};
60
61typedef struct {
62 unsigned int size; /* size of xpram segment in pages */
63 unsigned int offset; /* start page of xpram segment */
64} xpram_device_t;
65
66static xpram_device_t xpram_devices[XPRAM_MAX_DEVS];
67static unsigned int xpram_sizes[XPRAM_MAX_DEVS];
68static struct gendisk *xpram_disks[XPRAM_MAX_DEVS];
69static unsigned int xpram_pages;
70static int xpram_devs;
71
72/*
73 * Parameter parsing functions.
74 */
75static int devs = XPRAM_DEVS;
76static unsigned int sizes[XPRAM_MAX_DEVS];
77
78module_param(devs, int, 0);
79module_param_array(sizes, int, NULL, 0);
80
81MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \
82 "the default is " __MODULE_STRING(XPRAM_DEVS) "\n");
83MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \
84 "the defaults are 0s \n" \
85 "All devices with size 0 equally partition the "
86 "remaining space on the expanded strorage not "
87 "claimed by explicit sizes\n");
88MODULE_LICENSE("GPL");
89
90#ifndef MODULE
91/*
92 * Parses the kernel parameters given in the kernel parameter line.
93 * The expected format is
94 * <number_of_partitions>[","<partition_size>]*
95 * where
96 * devices is a positive integer that initializes xpram_devs
97 * each size is a non-negative integer possibly followed by a
98 * magnitude (k,K,m,M,g,G), the list of sizes initialises
99 * xpram_sizes
100 *
101 * Arguments
102 * str: substring of kernel parameter line that contains xprams
103 * kernel parameters.
104 *
105 * Result 0 on success, -EINVAL else -- only for Version > 2.3
106 *
107 * Side effects
108 * the global variabls devs is set to the value of
109 * <number_of_partitions> and sizes[i] is set to the i-th
110 * partition size (if provided). A parsing error of a value
111 * results in this value being set to -EINVAL.
112 */
113static int __init xpram_setup (char *str)
114{
115 char *cp;
116 int i;
117
118 devs = simple_strtoul(str, &cp, 10);
119 if (cp <= str || devs > XPRAM_MAX_DEVS)
120 return 0;
121 for (i = 0; (i < devs) && (*cp++ == ','); i++) {
122 sizes[i] = simple_strtoul(cp, &cp, 10);
123 if (*cp == 'g' || *cp == 'G') {
124 sizes[i] <<= 20;
125 cp++;
126 } else if (*cp == 'm' || *cp == 'M') {
127 sizes[i] <<= 10;
128 cp++;
129 } else if (*cp == 'k' || *cp == 'K')
130 cp++;
131 while (isspace(*cp)) cp++;
132 }
133 if (*cp == ',' && i >= devs)
134 PRINT_WARN("partition sizes list has too many entries.\n");
135 else if (*cp != 0)
136 PRINT_WARN("ignored '%s' at end of parameter string.\n", cp);
137 return 1;
138}
139
140__setup("xpram_parts=", xpram_setup);
141#endif
142
143/*
144 * Copy expanded memory page (4kB) into main memory
145 * Arguments
146 * page_addr: address of target page
147 * xpage_index: index of expandeded memory page
148 * Return value
149 * 0: if operation succeeds
150 * -EIO: if pgin failed
151 * -ENXIO: if xpram has vanished
152 */
153static int xpram_page_in (unsigned long page_addr, unsigned int xpage_index)
154{
155 int cc;
156
157 __asm__ __volatile__ (
158 " lhi %0,2\n" /* return unused cc 2 if pgin traps */
159 " .insn rre,0xb22e0000,%1,%2\n" /* pgin %1,%2 */
160 "0: ipm %0\n"
161 " srl %0,28\n"
162 "1:\n"
163#ifndef CONFIG_ARCH_S390X
164 ".section __ex_table,\"a\"\n"
165 " .align 4\n"
166 " .long 0b,1b\n"
167 ".previous"
168#else
169 ".section __ex_table,\"a\"\n"
170 " .align 8\n"
171 " .quad 0b,1b\n"
172 ".previous"
173#endif
174 : "=&d" (cc)
175 : "a" (__pa(page_addr)), "a" (xpage_index)
176 : "cc" );
177 if (cc == 3)
178 return -ENXIO;
179 if (cc == 2) {
180 PRINT_ERR("expanded storage lost!\n");
181 return -ENXIO;
182 }
183 if (cc == 1) {
184 PRINT_ERR("page in failed for page index %u.\n",
185 xpage_index);
186 return -EIO;
187 }
188 return 0;
189}
190
191/*
192 * Copy a 4kB page of main memory to an expanded memory page
193 * Arguments
194 * page_addr: address of source page
195 * xpage_index: index of expandeded memory page
196 * Return value
197 * 0: if operation succeeds
198 * -EIO: if pgout failed
199 * -ENXIO: if xpram has vanished
200 */
201static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index)
202{
203 int cc;
204
205 __asm__ __volatile__ (
206 " lhi %0,2\n" /* return unused cc 2 if pgout traps */
207 " .insn rre,0xb22f0000,%1,%2\n" /* pgout %1,%2 */
208 "0: ipm %0\n"
209 " srl %0,28\n"
210 "1:\n"
211#ifndef CONFIG_ARCH_S390X
212 ".section __ex_table,\"a\"\n"
213 " .align 4\n"
214 " .long 0b,1b\n"
215 ".previous"
216#else
217 ".section __ex_table,\"a\"\n"
218 " .align 8\n"
219 " .quad 0b,1b\n"
220 ".previous"
221#endif
222 : "=&d" (cc)
223 : "a" (__pa(page_addr)), "a" (xpage_index)
224 : "cc" );
225 if (cc == 3)
226 return -ENXIO;
227 if (cc == 2) {
228 PRINT_ERR("expanded storage lost!\n");
229 return -ENXIO;
230 }
231 if (cc == 1) {
232 PRINT_ERR("page out failed for page index %u.\n",
233 xpage_index);
234 return -EIO;
235 }
236 return 0;
237}
238
239/*
240 * Check if xpram is available.
241 */
242static int __init xpram_present(void)
243{
244 unsigned long mem_page;
245 int rc;
246
247 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
248 if (!mem_page)
249 return -ENOMEM;
250 rc = xpram_page_in(mem_page, 0);
251 free_page(mem_page);
252 return rc ? -ENXIO : 0;
253}
254
255/*
256 * Return index of the last available xpram page.
257 */
258static unsigned long __init xpram_highest_page_index(void)
259{
260 unsigned int page_index, add_bit;
261 unsigned long mem_page;
262
263 mem_page = (unsigned long) __get_free_page(GFP_KERNEL);
264 if (!mem_page)
265 return 0;
266
267 page_index = 0;
268 add_bit = 1ULL << (sizeof(unsigned int)*8 - 1);
269 while (add_bit > 0) {
270 if (xpram_page_in(mem_page, page_index | add_bit) == 0)
271 page_index |= add_bit;
272 add_bit >>= 1;
273 }
274
275 free_page (mem_page);
276
277 return page_index;
278}
279
280/*
281 * Block device make request function.
282 */
283static int xpram_make_request(request_queue_t *q, struct bio *bio)
284{
285 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
286 struct bio_vec *bvec;
287 unsigned int index;
288 unsigned long page_addr;
289 unsigned long bytes;
290 int i;
291
292 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
293 /* Request is not page-aligned. */
294 goto fail;
295 if ((bio->bi_size >> 12) > xdev->size)
296 /* Request size is no page-aligned. */
297 goto fail;
298 if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset)
299 goto fail;
300 index = (bio->bi_sector >> 3) + xdev->offset;
301 bio_for_each_segment(bvec, bio, i) {
302 page_addr = (unsigned long)
303 kmap(bvec->bv_page) + bvec->bv_offset;
304 bytes = bvec->bv_len;
305 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
306 /* More paranoia. */
307 goto fail;
308 while (bytes > 0) {
309 if (bio_data_dir(bio) == READ) {
310 if (xpram_page_in(page_addr, index) != 0)
311 goto fail;
312 } else {
313 if (xpram_page_out(page_addr, index) != 0)
314 goto fail;
315 }
316 page_addr += 4096;
317 bytes -= 4096;
318 index++;
319 }
320 }
321 set_bit(BIO_UPTODATE, &bio->bi_flags);
322 bytes = bio->bi_size;
323 bio->bi_size = 0;
324 bio->bi_end_io(bio, bytes, 0);
325 return 0;
326fail:
327 bio_io_error(bio, bio->bi_size);
328 return 0;
329}
330
331static int xpram_ioctl (struct inode *inode, struct file *filp,
332 unsigned int cmd, unsigned long arg)
333{
334 struct hd_geometry __user *geo;
335 unsigned long size;
336 if (cmd != HDIO_GETGEO)
337 return -EINVAL;
338 /*
339 * get geometry: we have to fake one... trim the size to a
340 * multiple of 64 (32k): tell we have 16 sectors, 4 heads,
341 * whatever cylinders. Tell also that data starts at sector. 4.
342 */
343 geo = (struct hd_geometry __user *) arg;
344 size = (xpram_pages * 8) & ~0x3f;
345 put_user(size >> 6, &geo->cylinders);
346 put_user(4, &geo->heads);
347 put_user(16, &geo->sectors);
348 put_user(4, &geo->start);
349 return 0;
350}
351
352static struct block_device_operations xpram_devops =
353{
354 .owner = THIS_MODULE,
355 .ioctl = xpram_ioctl,
356};
357
358/*
359 * Setup xpram_sizes array.
360 */
361static int __init xpram_setup_sizes(unsigned long pages)
362{
363 unsigned long mem_needed;
364 unsigned long mem_auto;
365 int mem_auto_no;
366 int i;
367
368 /* Check number of devices. */
369 if (devs <= 0 || devs > XPRAM_MAX_DEVS) {
370 PRINT_ERR("invalid number %d of devices\n",devs);
371 return -EINVAL;
372 }
373 xpram_devs = devs;
374
375 /*
376 * Copy sizes array to xpram_sizes and align partition
377 * sizes to page boundary.
378 */
379 mem_needed = 0;
380 mem_auto_no = 0;
381 for (i = 0; i < xpram_devs; i++) {
382 xpram_sizes[i] = (sizes[i] + 3) & -4UL;
383 if (xpram_sizes[i])
384 mem_needed += xpram_sizes[i];
385 else
386 mem_auto_no++;
387 }
388
389 PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs);
390 for (i = 0; i < xpram_devs; i++) {
391 if (xpram_sizes[i])
392 PRINT_INFO(" size of partition %d: %u kB\n",
393 i, xpram_sizes[i]);
394 else
395 PRINT_INFO(" size of partition %d to be set "
396 "automatically\n",i);
397 }
398 PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n",
399 mem_needed);
400 PRINT_DEBUG(" partitions to be sized automatically: %d\n",
401 mem_auto_no);
402
403 if (mem_needed > pages * 4) {
404 PRINT_ERR("Not enough expanded memory available\n");
405 return -EINVAL;
406 }
407
408 /*
409 * partitioning:
410 * xpram_sizes[i] != 0; partition i has size xpram_sizes[i] kB
411 * else: ; all partitions with zero xpram_sizes[i]
412 * partition equally the remaining space
413 */
414 if (mem_auto_no) {
415 mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4;
416 PRINT_INFO(" automatically determined "
417 "partition size: %lu kB\n", mem_auto);
418 for (i = 0; i < xpram_devs; i++)
419 if (xpram_sizes[i] == 0)
420 xpram_sizes[i] = mem_auto;
421 }
422 return 0;
423}
424
425static struct request_queue *xpram_queue;
426
427static int __init xpram_setup_blkdev(void)
428{
429 unsigned long offset;
430 int i, rc = -ENOMEM;
431
432 for (i = 0; i < xpram_devs; i++) {
433 struct gendisk *disk = alloc_disk(1);
434 if (!disk)
435 goto out;
436 xpram_disks[i] = disk;
437 }
438
439 /*
440 * Register xpram major.
441 */
442 rc = register_blkdev(XPRAM_MAJOR, XPRAM_NAME);
443 if (rc < 0)
444 goto out;
445
446 devfs_mk_dir("slram");
447
448 /*
449 * Assign the other needed values: make request function, sizes and
450 * hardsect size. All the minor devices feature the same value.
451 */
452 xpram_queue = blk_alloc_queue(GFP_KERNEL);
453 if (!xpram_queue) {
454 rc = -ENOMEM;
455 goto out_unreg;
456 }
457 blk_queue_make_request(xpram_queue, xpram_make_request);
458 blk_queue_hardsect_size(xpram_queue, 4096);
459
460 /*
461 * Setup device structures.
462 */
463 offset = 0;
464 for (i = 0; i < xpram_devs; i++) {
465 struct gendisk *disk = xpram_disks[i];
466
467 xpram_devices[i].size = xpram_sizes[i] / 4;
468 xpram_devices[i].offset = offset;
469 offset += xpram_devices[i].size;
470 disk->major = XPRAM_MAJOR;
471 disk->first_minor = i;
472 disk->fops = &xpram_devops;
473 disk->private_data = &xpram_devices[i];
474 disk->queue = xpram_queue;
475 sprintf(disk->disk_name, "slram%d", i);
476 sprintf(disk->devfs_name, "slram/%d", i);
477 set_capacity(disk, xpram_sizes[i] << 1);
478 add_disk(disk);
479 }
480
481 return 0;
482out_unreg:
483 devfs_remove("slram");
484 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
485out:
486 while (i--)
487 put_disk(xpram_disks[i]);
488 return rc;
489}
490
491/*
492 * Finally, the init/exit functions.
493 */
494static void __exit xpram_exit(void)
495{
496 int i;
497 for (i = 0; i < xpram_devs; i++) {
498 del_gendisk(xpram_disks[i]);
499 put_disk(xpram_disks[i]);
500 }
501 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
502 devfs_remove("slram");
503 blk_cleanup_queue(xpram_queue);
504 sysdev_unregister(&xpram_sys_device);
505 sysdev_class_unregister(&xpram_sysclass);
506}
507
508static int __init xpram_init(void)
509{
510 int rc;
511
512 /* Find out size of expanded memory. */
513 if (xpram_present() != 0) {
514 PRINT_WARN("No expanded memory available\n");
515 return -ENODEV;
516 }
517 xpram_pages = xpram_highest_page_index();
518 PRINT_INFO(" %u pages expanded memory found (%lu KB).\n",
519 xpram_pages, (unsigned long) xpram_pages*4);
520 rc = xpram_setup_sizes(xpram_pages);
521 if (rc)
522 return rc;
523 rc = sysdev_class_register(&xpram_sysclass);
524 if (rc)
525 return rc;
526
527 rc = sysdev_register(&xpram_sys_device);
528 if (rc) {
529 sysdev_class_unregister(&xpram_sysclass);
530 return rc;
531 }
532 rc = xpram_setup_blkdev();
533 if (rc)
534 sysdev_unregister(&xpram_sys_device);
535 return rc;
536}
537
538module_init(xpram_init);
539module_exit(xpram_exit);