aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/block/dasd.c
diff options
context:
space:
mode:
authorStefan Weinhuber <wein@de.ibm.com>2008-01-26 08:11:23 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-01-26 08:11:28 -0500
commit8e09f21574ea3028d5629e5de759e0b196c690c5 (patch)
treeced4feb1847ee6c2a7b7b4cec8f3118f83d3a386 /drivers/s390/block/dasd.c
parent0ac30be461084f30ad6e22c6b91347e880ed41aa (diff)
[S390] dasd: add hyper PAV support to DASD device driver, part 1
Parallel access volumes (PAV) is a storage server feature, that allows to start multiple channel programs on the same DASD in parallel. It defines alias devices which can be used as alternative paths to the same disk. With the old base PAV support we only needed rudimentary functionality in the DASD device driver. As the mapping between base and alias devices was static, we just had to export an identifier (uid) and could leave the combining of devices to external layers like a device mapper multipath. Now hyper PAV removes the requirement to dedicate alias devices to specific base devices. Instead each alias devices can be combined with multiple base device on a per request basis. This requires full support by the DASD device driver as now each channel program itself has to identify the target base device. The changes to the dasd device driver and the ECKD discipline are: - Separate subchannel device representation (dasd_device) from block device representation (dasd_block). Only base devices are block devices. - Gather information about base and alias devices and possible combinations. - For each request decide which dasd_device should be used (base or alias) and build specific channel program. - Support summary unit checks, which allow the storage server to upgrade / downgrade between base and hyper PAV at runtime (support is mandatory). Signed-off-by: Stefan Weinhuber <wein@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/block/dasd.c')
-rw-r--r--drivers/s390/block/dasd.c1680
1 files changed, 944 insertions, 736 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index da4fe1ecef9e..db9193d38986 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -48,13 +48,15 @@ MODULE_LICENSE("GPL");
48/* 48/*
49 * SECTION: prototypes for static functions of dasd.c 49 * SECTION: prototypes for static functions of dasd.c
50 */ 50 */
51static int dasd_alloc_queue(struct dasd_device * device); 51static int dasd_alloc_queue(struct dasd_block *);
52static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_block *);
53static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_block *);
54static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_block *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_block_queue(struct dasd_block *);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_device_tasklet(struct dasd_device *);
57static void dasd_block_tasklet(struct dasd_block *);
57static void do_kick_device(struct work_struct *); 58static void do_kick_device(struct work_struct *);
59static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
58 60
59/* 61/*
60 * SECTION: Operations on the device structure. 62 * SECTION: Operations on the device structure.
@@ -65,26 +67,23 @@ static wait_queue_head_t dasd_flush_wq;
65/* 67/*
66 * Allocate memory for a new device structure. 68 * Allocate memory for a new device structure.
67 */ 69 */
68struct dasd_device * 70struct dasd_device *dasd_alloc_device(void)
69dasd_alloc_device(void)
70{ 71{
71 struct dasd_device *device; 72 struct dasd_device *device;
72 73
73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
74 if (device == NULL) 75 if (!device)
75 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
76 /* open_count = 0 means device online but not in use */
77 atomic_set(&device->open_count, -1);
78 77
79 /* Get two pages for normal block device operations. */ 78 /* Get two pages for normal block device operations. */
80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 79 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
81 if (device->ccw_mem == NULL) { 80 if (!device->ccw_mem) {
82 kfree(device); 81 kfree(device);
83 return ERR_PTR(-ENOMEM); 82 return ERR_PTR(-ENOMEM);
84 } 83 }
85 /* Get one page for error recovery. */ 84 /* Get one page for error recovery. */
86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 85 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
87 if (device->erp_mem == NULL) { 86 if (!device->erp_mem) {
88 free_pages((unsigned long) device->ccw_mem, 1); 87 free_pages((unsigned long) device->ccw_mem, 1);
89 kfree(device); 88 kfree(device);
90 return ERR_PTR(-ENOMEM); 89 return ERR_PTR(-ENOMEM);
@@ -93,10 +92,9 @@ dasd_alloc_device(void)
93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 92 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 93 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
95 spin_lock_init(&device->mem_lock); 94 spin_lock_init(&device->mem_lock);
96 spin_lock_init(&device->request_queue_lock); 95 atomic_set(&device->tasklet_scheduled, 0);
97 atomic_set (&device->tasklet_scheduled, 0);
98 tasklet_init(&device->tasklet, 96 tasklet_init(&device->tasklet,
99 (void (*)(unsigned long)) dasd_tasklet, 97 (void (*)(unsigned long)) dasd_device_tasklet,
100 (unsigned long) device); 98 (unsigned long) device);
101 INIT_LIST_HEAD(&device->ccw_queue); 99 INIT_LIST_HEAD(&device->ccw_queue);
102 init_timer(&device->timer); 100 init_timer(&device->timer);
@@ -110,8 +108,7 @@ dasd_alloc_device(void)
110/* 108/*
111 * Free memory of a device structure. 109 * Free memory of a device structure.
112 */ 110 */
113void 111void dasd_free_device(struct dasd_device *device)
114dasd_free_device(struct dasd_device *device)
115{ 112{
116 kfree(device->private); 113 kfree(device->private);
117 free_page((unsigned long) device->erp_mem); 114 free_page((unsigned long) device->erp_mem);
@@ -120,10 +117,42 @@ dasd_free_device(struct dasd_device *device)
120} 117}
121 118
122/* 119/*
120 * Allocate memory for a new device structure.
121 */
122struct dasd_block *dasd_alloc_block(void)
123{
124 struct dasd_block *block;
125
126 block = kzalloc(sizeof(*block), GFP_ATOMIC);
127 if (!block)
128 return ERR_PTR(-ENOMEM);
129 /* open_count = 0 means device online but not in use */
130 atomic_set(&block->open_count, -1);
131
132 spin_lock_init(&block->request_queue_lock);
133 atomic_set(&block->tasklet_scheduled, 0);
134 tasklet_init(&block->tasklet,
135 (void (*)(unsigned long)) dasd_block_tasklet,
136 (unsigned long) block);
137 INIT_LIST_HEAD(&block->ccw_queue);
138 spin_lock_init(&block->queue_lock);
139 init_timer(&block->timer);
140
141 return block;
142}
143
144/*
145 * Free memory of a device structure.
146 */
147void dasd_free_block(struct dasd_block *block)
148{
149 kfree(block);
150}
151
152/*
123 * Make a new device known to the system. 153 * Make a new device known to the system.
124 */ 154 */
125static int 155static int dasd_state_new_to_known(struct dasd_device *device)
126dasd_state_new_to_known(struct dasd_device *device)
127{ 156{
128 int rc; 157 int rc;
129 158
@@ -133,12 +162,13 @@ dasd_state_new_to_known(struct dasd_device *device)
133 */ 162 */
134 dasd_get_device(device); 163 dasd_get_device(device);
135 164
136 rc = dasd_alloc_queue(device); 165 if (device->block) {
137 if (rc) { 166 rc = dasd_alloc_queue(device->block);
138 dasd_put_device(device); 167 if (rc) {
139 return rc; 168 dasd_put_device(device);
169 return rc;
170 }
140 } 171 }
141
142 device->state = DASD_STATE_KNOWN; 172 device->state = DASD_STATE_KNOWN;
143 return 0; 173 return 0;
144} 174}
@@ -146,21 +176,24 @@ dasd_state_new_to_known(struct dasd_device *device)
146/* 176/*
147 * Let the system forget about a device. 177 * Let the system forget about a device.
148 */ 178 */
149static int 179static int dasd_state_known_to_new(struct dasd_device *device)
150dasd_state_known_to_new(struct dasd_device * device)
151{ 180{
152 /* Disable extended error reporting for this device. */ 181 /* Disable extended error reporting for this device. */
153 dasd_eer_disable(device); 182 dasd_eer_disable(device);
154 /* Forget the discipline information. */ 183 /* Forget the discipline information. */
155 if (device->discipline) 184 if (device->discipline) {
185 if (device->discipline->uncheck_device)
186 device->discipline->uncheck_device(device);
156 module_put(device->discipline->owner); 187 module_put(device->discipline->owner);
188 }
157 device->discipline = NULL; 189 device->discipline = NULL;
158 if (device->base_discipline) 190 if (device->base_discipline)
159 module_put(device->base_discipline->owner); 191 module_put(device->base_discipline->owner);
160 device->base_discipline = NULL; 192 device->base_discipline = NULL;
161 device->state = DASD_STATE_NEW; 193 device->state = DASD_STATE_NEW;
162 194
163 dasd_free_queue(device); 195 if (device->block)
196 dasd_free_queue(device->block);
164 197
165 /* Give up reference we took in dasd_state_new_to_known. */ 198 /* Give up reference we took in dasd_state_new_to_known. */
166 dasd_put_device(device); 199 dasd_put_device(device);
@@ -170,19 +203,19 @@ dasd_state_known_to_new(struct dasd_device * device)
170/* 203/*
171 * Request the irq line for the device. 204 * Request the irq line for the device.
172 */ 205 */
173static int 206static int dasd_state_known_to_basic(struct dasd_device *device)
174dasd_state_known_to_basic(struct dasd_device * device)
175{ 207{
176 int rc; 208 int rc;
177 209
178 /* Allocate and register gendisk structure. */ 210 /* Allocate and register gendisk structure. */
179 rc = dasd_gendisk_alloc(device); 211 if (device->block) {
180 if (rc) 212 rc = dasd_gendisk_alloc(device->block);
181 return rc; 213 if (rc)
182 214 return rc;
215 }
183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 216 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 217 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
185 8 * sizeof (long)); 218 8 * sizeof(long));
186 debug_register_view(device->debug_area, &debug_sprintf_view); 219 debug_register_view(device->debug_area, &debug_sprintf_view);
187 debug_set_level(device->debug_area, DBF_WARNING); 220 debug_set_level(device->debug_area, DBF_WARNING);
188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 221 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
@@ -194,16 +227,17 @@ dasd_state_known_to_basic(struct dasd_device * device)
194/* 227/*
195 * Release the irq line for the device. Terminate any running i/o. 228 * Release the irq line for the device. Terminate any running i/o.
196 */ 229 */
197static int 230static int dasd_state_basic_to_known(struct dasd_device *device)
198dasd_state_basic_to_known(struct dasd_device * device)
199{ 231{
200 int rc; 232 int rc;
201 233 if (device->block) {
202 dasd_gendisk_free(device); 234 dasd_gendisk_free(device->block);
203 rc = dasd_flush_ccw_queue(device, 1); 235 dasd_block_clear_timer(device->block);
236 }
237 rc = dasd_flush_device_queue(device);
204 if (rc) 238 if (rc)
205 return rc; 239 return rc;
206 dasd_clear_timer(device); 240 dasd_device_clear_timer(device);
207 241
208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 242 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
209 if (device->debug_area != NULL) { 243 if (device->debug_area != NULL) {
@@ -228,26 +262,32 @@ dasd_state_basic_to_known(struct dasd_device * device)
228 * In case the analysis returns an error, the device setup is stopped 262 * In case the analysis returns an error, the device setup is stopped
229 * (a fake disk was already added to allow formatting). 263 * (a fake disk was already added to allow formatting).
230 */ 264 */
231static int 265static int dasd_state_basic_to_ready(struct dasd_device *device)
232dasd_state_basic_to_ready(struct dasd_device * device)
233{ 266{
234 int rc; 267 int rc;
268 struct dasd_block *block;
235 269
236 rc = 0; 270 rc = 0;
237 if (device->discipline->do_analysis != NULL) 271 block = device->block;
238 rc = device->discipline->do_analysis(device);
239 if (rc) {
240 if (rc != -EAGAIN)
241 device->state = DASD_STATE_UNFMT;
242 return rc;
243 }
244 /* make disk known with correct capacity */ 272 /* make disk known with correct capacity */
245 dasd_setup_queue(device); 273 if (block) {
246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 274 if (block->base->discipline->do_analysis != NULL)
247 device->state = DASD_STATE_READY; 275 rc = block->base->discipline->do_analysis(block);
248 rc = dasd_scan_partitions(device); 276 if (rc) {
249 if (rc) 277 if (rc != -EAGAIN)
250 device->state = DASD_STATE_BASIC; 278 device->state = DASD_STATE_UNFMT;
279 return rc;
280 }
281 dasd_setup_queue(block);
282 set_capacity(block->gdp,
283 block->blocks << block->s2b_shift);
284 device->state = DASD_STATE_READY;
285 rc = dasd_scan_partitions(block);
286 if (rc)
287 device->state = DASD_STATE_BASIC;
288 } else {
289 device->state = DASD_STATE_READY;
290 }
251 return rc; 291 return rc;
252} 292}
253 293
@@ -256,28 +296,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
256 * Forget format information. Check if the target level is basic 296 * Forget format information. Check if the target level is basic
257 * and if it is create fake disk for formatting. 297 * and if it is create fake disk for formatting.
258 */ 298 */
259static int 299static int dasd_state_ready_to_basic(struct dasd_device *device)
260dasd_state_ready_to_basic(struct dasd_device * device)
261{ 300{
262 int rc; 301 int rc;
263 302
264 rc = dasd_flush_ccw_queue(device, 0);
265 if (rc)
266 return rc;
267 dasd_destroy_partitions(device);
268 dasd_flush_request_queue(device);
269 device->blocks = 0;
270 device->bp_block = 0;
271 device->s2b_shift = 0;
272 device->state = DASD_STATE_BASIC; 303 device->state = DASD_STATE_BASIC;
304 if (device->block) {
305 struct dasd_block *block = device->block;
306 rc = dasd_flush_block_queue(block);
307 if (rc) {
308 device->state = DASD_STATE_READY;
309 return rc;
310 }
311 dasd_destroy_partitions(block);
312 dasd_flush_request_queue(block);
313 block->blocks = 0;
314 block->bp_block = 0;
315 block->s2b_shift = 0;
316 }
273 return 0; 317 return 0;
274} 318}
275 319
276/* 320/*
277 * Back to basic. 321 * Back to basic.
278 */ 322 */
279static int 323static int dasd_state_unfmt_to_basic(struct dasd_device *device)
280dasd_state_unfmt_to_basic(struct dasd_device * device)
281{ 324{
282 device->state = DASD_STATE_BASIC; 325 device->state = DASD_STATE_BASIC;
283 return 0; 326 return 0;
@@ -291,17 +334,31 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
291static int 334static int
292dasd_state_ready_to_online(struct dasd_device * device) 335dasd_state_ready_to_online(struct dasd_device * device)
293{ 336{
337 int rc;
338
339 if (device->discipline->ready_to_online) {
340 rc = device->discipline->ready_to_online(device);
341 if (rc)
342 return rc;
343 }
294 device->state = DASD_STATE_ONLINE; 344 device->state = DASD_STATE_ONLINE;
295 dasd_schedule_bh(device); 345 if (device->block)
346 dasd_schedule_block_bh(device->block);
296 return 0; 347 return 0;
297} 348}
298 349
299/* 350/*
300 * Stop the requeueing of requests again. 351 * Stop the requeueing of requests again.
301 */ 352 */
302static int 353static int dasd_state_online_to_ready(struct dasd_device *device)
303dasd_state_online_to_ready(struct dasd_device * device)
304{ 354{
355 int rc;
356
357 if (device->discipline->online_to_ready) {
358 rc = device->discipline->online_to_ready(device);
359 if (rc)
360 return rc;
361 }
305 device->state = DASD_STATE_READY; 362 device->state = DASD_STATE_READY;
306 return 0; 363 return 0;
307} 364}
@@ -309,8 +366,7 @@ dasd_state_online_to_ready(struct dasd_device * device)
309/* 366/*
310 * Device startup state changes. 367 * Device startup state changes.
311 */ 368 */
312static int 369static int dasd_increase_state(struct dasd_device *device)
313dasd_increase_state(struct dasd_device *device)
314{ 370{
315 int rc; 371 int rc;
316 372
@@ -345,8 +401,7 @@ dasd_increase_state(struct dasd_device *device)
345/* 401/*
346 * Device shutdown state changes. 402 * Device shutdown state changes.
347 */ 403 */
348static int 404static int dasd_decrease_state(struct dasd_device *device)
349dasd_decrease_state(struct dasd_device *device)
350{ 405{
351 int rc; 406 int rc;
352 407
@@ -381,8 +436,7 @@ dasd_decrease_state(struct dasd_device *device)
381/* 436/*
382 * This is the main startup/shutdown routine. 437 * This is the main startup/shutdown routine.
383 */ 438 */
384static void 439static void dasd_change_state(struct dasd_device *device)
385dasd_change_state(struct dasd_device *device)
386{ 440{
387 int rc; 441 int rc;
388 442
@@ -409,17 +463,15 @@ dasd_change_state(struct dasd_device *device)
409 * dasd_kick_device will schedule a call do do_kick_device to the kernel 463 * dasd_kick_device will schedule a call do do_kick_device to the kernel
410 * event daemon. 464 * event daemon.
411 */ 465 */
412static void 466static void do_kick_device(struct work_struct *work)
413do_kick_device(struct work_struct *work)
414{ 467{
415 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 468 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
416 dasd_change_state(device); 469 dasd_change_state(device);
417 dasd_schedule_bh(device); 470 dasd_schedule_device_bh(device);
418 dasd_put_device(device); 471 dasd_put_device(device);
419} 472}
420 473
421void 474void dasd_kick_device(struct dasd_device *device)
422dasd_kick_device(struct dasd_device *device)
423{ 475{
424 dasd_get_device(device); 476 dasd_get_device(device);
425 /* queue call to dasd_kick_device to the kernel event daemon. */ 477 /* queue call to dasd_kick_device to the kernel event daemon. */
@@ -429,8 +481,7 @@ dasd_kick_device(struct dasd_device *device)
429/* 481/*
430 * Set the target state for a device and starts the state change. 482 * Set the target state for a device and starts the state change.
431 */ 483 */
432void 484void dasd_set_target_state(struct dasd_device *device, int target)
433dasd_set_target_state(struct dasd_device *device, int target)
434{ 485{
435 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 486 /* If we are in probeonly mode stop at DASD_STATE_READY. */
436 if (dasd_probeonly && target > DASD_STATE_READY) 487 if (dasd_probeonly && target > DASD_STATE_READY)
@@ -447,14 +498,12 @@ dasd_set_target_state(struct dasd_device *device, int target)
447/* 498/*
448 * Enable devices with device numbers in [from..to]. 499 * Enable devices with device numbers in [from..to].
449 */ 500 */
450static inline int 501static inline int _wait_for_device(struct dasd_device *device)
451_wait_for_device(struct dasd_device *device)
452{ 502{
453 return (device->state == device->target); 503 return (device->state == device->target);
454} 504}
455 505
456void 506void dasd_enable_device(struct dasd_device *device)
457dasd_enable_device(struct dasd_device *device)
458{ 507{
459 dasd_set_target_state(device, DASD_STATE_ONLINE); 508 dasd_set_target_state(device, DASD_STATE_ONLINE);
460 if (device->state <= DASD_STATE_KNOWN) 509 if (device->state <= DASD_STATE_KNOWN)
@@ -475,20 +524,20 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
475/* 524/*
476 * Increments counter in global and local profiling structures. 525 * Increments counter in global and local profiling structures.
477 */ 526 */
478#define dasd_profile_counter(value, counter, device) \ 527#define dasd_profile_counter(value, counter, block) \
479{ \ 528{ \
480 int index; \ 529 int index; \
481 for (index = 0; index < 31 && value >> (2+index); index++); \ 530 for (index = 0; index < 31 && value >> (2+index); index++); \
482 dasd_global_profile.counter[index]++; \ 531 dasd_global_profile.counter[index]++; \
483 device->profile.counter[index]++; \ 532 block->profile.counter[index]++; \
484} 533}
485 534
486/* 535/*
487 * Add profiling information for cqr before execution. 536 * Add profiling information for cqr before execution.
488 */ 537 */
489static void 538static void dasd_profile_start(struct dasd_block *block,
490dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 539 struct dasd_ccw_req *cqr,
491 struct request *req) 540 struct request *req)
492{ 541{
493 struct list_head *l; 542 struct list_head *l;
494 unsigned int counter; 543 unsigned int counter;
@@ -498,19 +547,19 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
498 547
499 /* count the length of the chanq for statistics */ 548 /* count the length of the chanq for statistics */
500 counter = 0; 549 counter = 0;
501 list_for_each(l, &device->ccw_queue) 550 list_for_each(l, &block->ccw_queue)
502 if (++counter >= 31) 551 if (++counter >= 31)
503 break; 552 break;
504 dasd_global_profile.dasd_io_nr_req[counter]++; 553 dasd_global_profile.dasd_io_nr_req[counter]++;
505 device->profile.dasd_io_nr_req[counter]++; 554 block->profile.dasd_io_nr_req[counter]++;
506} 555}
507 556
508/* 557/*
509 * Add profiling information for cqr after execution. 558 * Add profiling information for cqr after execution.
510 */ 559 */
511static void 560static void dasd_profile_end(struct dasd_block *block,
512dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 561 struct dasd_ccw_req *cqr,
513 struct request *req) 562 struct request *req)
514{ 563{
515 long strtime, irqtime, endtime, tottime; /* in microseconds */ 564 long strtime, irqtime, endtime, tottime; /* in microseconds */
516 long tottimeps, sectors; 565 long tottimeps, sectors;
@@ -532,27 +581,27 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
532 581
533 if (!dasd_global_profile.dasd_io_reqs) 582 if (!dasd_global_profile.dasd_io_reqs)
534 memset(&dasd_global_profile, 0, 583 memset(&dasd_global_profile, 0,
535 sizeof (struct dasd_profile_info_t)); 584 sizeof(struct dasd_profile_info_t));
536 dasd_global_profile.dasd_io_reqs++; 585 dasd_global_profile.dasd_io_reqs++;
537 dasd_global_profile.dasd_io_sects += sectors; 586 dasd_global_profile.dasd_io_sects += sectors;
538 587
539 if (!device->profile.dasd_io_reqs) 588 if (!block->profile.dasd_io_reqs)
540 memset(&device->profile, 0, 589 memset(&block->profile, 0,
541 sizeof (struct dasd_profile_info_t)); 590 sizeof(struct dasd_profile_info_t));
542 device->profile.dasd_io_reqs++; 591 block->profile.dasd_io_reqs++;
543 device->profile.dasd_io_sects += sectors; 592 block->profile.dasd_io_sects += sectors;
544 593
545 dasd_profile_counter(sectors, dasd_io_secs, device); 594 dasd_profile_counter(sectors, dasd_io_secs, block);
546 dasd_profile_counter(tottime, dasd_io_times, device); 595 dasd_profile_counter(tottime, dasd_io_times, block);
547 dasd_profile_counter(tottimeps, dasd_io_timps, device); 596 dasd_profile_counter(tottimeps, dasd_io_timps, block);
548 dasd_profile_counter(strtime, dasd_io_time1, device); 597 dasd_profile_counter(strtime, dasd_io_time1, block);
549 dasd_profile_counter(irqtime, dasd_io_time2, device); 598 dasd_profile_counter(irqtime, dasd_io_time2, block);
550 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 599 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
551 dasd_profile_counter(endtime, dasd_io_time3, device); 600 dasd_profile_counter(endtime, dasd_io_time3, block);
552} 601}
553#else 602#else
554#define dasd_profile_start(device, cqr, req) do {} while (0) 603#define dasd_profile_start(block, cqr, req) do {} while (0)
555#define dasd_profile_end(device, cqr, req) do {} while (0) 604#define dasd_profile_end(block, cqr, req) do {} while (0)
556#endif /* CONFIG_DASD_PROFILE */ 605#endif /* CONFIG_DASD_PROFILE */
557 606
558/* 607/*
@@ -562,9 +611,9 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
562 * memory and 2) dasd_smalloc_request uses the static ccw memory 611 * memory and 2) dasd_smalloc_request uses the static ccw memory
563 * that gets allocated for each device. 612 * that gets allocated for each device.
564 */ 613 */
565struct dasd_ccw_req * 614struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
566dasd_kmalloc_request(char *magic, int cplength, int datasize, 615 int datasize,
567 struct dasd_device * device) 616 struct dasd_device *device)
568{ 617{
569 struct dasd_ccw_req *cqr; 618 struct dasd_ccw_req *cqr;
570 619
@@ -600,9 +649,9 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize,
600 return cqr; 649 return cqr;
601} 650}
602 651
603struct dasd_ccw_req * 652struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
604dasd_smalloc_request(char *magic, int cplength, int datasize, 653 int datasize,
605 struct dasd_device * device) 654 struct dasd_device *device)
606{ 655{
607 unsigned long flags; 656 unsigned long flags;
608 struct dasd_ccw_req *cqr; 657 struct dasd_ccw_req *cqr;
@@ -649,8 +698,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
649 * idal lists that might have been created by dasd_set_cda and the 698 * idal lists that might have been created by dasd_set_cda and the
650 * struct dasd_ccw_req itself. 699 * struct dasd_ccw_req itself.
651 */ 700 */
652void 701void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
653dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
654{ 702{
655#ifdef CONFIG_64BIT 703#ifdef CONFIG_64BIT
656 struct ccw1 *ccw; 704 struct ccw1 *ccw;
@@ -667,8 +715,7 @@ dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
667 dasd_put_device(device); 715 dasd_put_device(device);
668} 716}
669 717
670void 718void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
671dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
672{ 719{
673 unsigned long flags; 720 unsigned long flags;
674 721
@@ -681,14 +728,13 @@ dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
681/* 728/*
682 * Check discipline magic in cqr. 729 * Check discipline magic in cqr.
683 */ 730 */
684static inline int 731static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
685dasd_check_cqr(struct dasd_ccw_req *cqr)
686{ 732{
687 struct dasd_device *device; 733 struct dasd_device *device;
688 734
689 if (cqr == NULL) 735 if (cqr == NULL)
690 return -EINVAL; 736 return -EINVAL;
691 device = cqr->device; 737 device = cqr->startdev;
692 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 738 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
693 DEV_MESSAGE(KERN_WARNING, device, 739 DEV_MESSAGE(KERN_WARNING, device,
694 " dasd_ccw_req 0x%08x magic doesn't match" 740 " dasd_ccw_req 0x%08x magic doesn't match"
@@ -706,8 +752,7 @@ dasd_check_cqr(struct dasd_ccw_req *cqr)
706 * ccw_device_clear can fail if the i/o subsystem 752 * ccw_device_clear can fail if the i/o subsystem
707 * is in a bad mood. 753 * is in a bad mood.
708 */ 754 */
709int 755int dasd_term_IO(struct dasd_ccw_req *cqr)
710dasd_term_IO(struct dasd_ccw_req * cqr)
711{ 756{
712 struct dasd_device *device; 757 struct dasd_device *device;
713 int retries, rc; 758 int retries, rc;
@@ -717,13 +762,13 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
717 if (rc) 762 if (rc)
718 return rc; 763 return rc;
719 retries = 0; 764 retries = 0;
720 device = (struct dasd_device *) cqr->device; 765 device = (struct dasd_device *) cqr->startdev;
721 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 766 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
722 rc = ccw_device_clear(device->cdev, (long) cqr); 767 rc = ccw_device_clear(device->cdev, (long) cqr);
723 switch (rc) { 768 switch (rc) {
724 case 0: /* termination successful */ 769 case 0: /* termination successful */
725 cqr->retries--; 770 cqr->retries--;
726 cqr->status = DASD_CQR_CLEAR; 771 cqr->status = DASD_CQR_CLEAR_PENDING;
727 cqr->stopclk = get_clock(); 772 cqr->stopclk = get_clock();
728 cqr->starttime = 0; 773 cqr->starttime = 0;
729 DBF_DEV_EVENT(DBF_DEBUG, device, 774 DBF_DEV_EVENT(DBF_DEBUG, device,
@@ -753,7 +798,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
753 } 798 }
754 retries++; 799 retries++;
755 } 800 }
756 dasd_schedule_bh(device); 801 dasd_schedule_device_bh(device);
757 return rc; 802 return rc;
758} 803}
759 804
@@ -761,8 +806,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
761 * Start the i/o. This start_IO can fail if the channel is really busy. 806 * Start the i/o. This start_IO can fail if the channel is really busy.
762 * In that case set up a timer to start the request later. 807 * In that case set up a timer to start the request later.
763 */ 808 */
764int 809int dasd_start_IO(struct dasd_ccw_req *cqr)
765dasd_start_IO(struct dasd_ccw_req * cqr)
766{ 810{
767 struct dasd_device *device; 811 struct dasd_device *device;
768 int rc; 812 int rc;
@@ -771,12 +815,12 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
771 rc = dasd_check_cqr(cqr); 815 rc = dasd_check_cqr(cqr);
772 if (rc) 816 if (rc)
773 return rc; 817 return rc;
774 device = (struct dasd_device *) cqr->device; 818 device = (struct dasd_device *) cqr->startdev;
775 if (cqr->retries < 0) { 819 if (cqr->retries < 0) {
776 DEV_MESSAGE(KERN_DEBUG, device, 820 DEV_MESSAGE(KERN_DEBUG, device,
777 "start_IO: request %p (%02x/%i) - no retry left.", 821 "start_IO: request %p (%02x/%i) - no retry left.",
778 cqr, cqr->status, cqr->retries); 822 cqr, cqr->status, cqr->retries);
779 cqr->status = DASD_CQR_FAILED; 823 cqr->status = DASD_CQR_ERROR;
780 return -EIO; 824 return -EIO;
781 } 825 }
782 cqr->startclk = get_clock(); 826 cqr->startclk = get_clock();
@@ -833,8 +877,7 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
833 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 877 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
834 * DASD_CQR_QUEUED for 2) and 3). 878 * DASD_CQR_QUEUED for 2) and 3).
835 */ 879 */
836static void 880static void dasd_device_timeout(unsigned long ptr)
837dasd_timeout_device(unsigned long ptr)
838{ 881{
839 unsigned long flags; 882 unsigned long flags;
840 struct dasd_device *device; 883 struct dasd_device *device;
@@ -844,14 +887,13 @@ dasd_timeout_device(unsigned long ptr)
844 /* re-activate request queue */ 887 /* re-activate request queue */
845 device->stopped &= ~DASD_STOPPED_PENDING; 888 device->stopped &= ~DASD_STOPPED_PENDING;
846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 889 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
847 dasd_schedule_bh(device); 890 dasd_schedule_device_bh(device);
848} 891}
849 892
850/* 893/*
851 * Setup timeout for a device in jiffies. 894 * Setup timeout for a device in jiffies.
852 */ 895 */
853void 896void dasd_device_set_timer(struct dasd_device *device, int expires)
854dasd_set_timer(struct dasd_device *device, int expires)
855{ 897{
856 if (expires == 0) { 898 if (expires == 0) {
857 if (timer_pending(&device->timer)) 899 if (timer_pending(&device->timer))
@@ -862,7 +904,7 @@ dasd_set_timer(struct dasd_device *device, int expires)
862 if (mod_timer(&device->timer, jiffies + expires)) 904 if (mod_timer(&device->timer, jiffies + expires))
863 return; 905 return;
864 } 906 }
865 device->timer.function = dasd_timeout_device; 907 device->timer.function = dasd_device_timeout;
866 device->timer.data = (unsigned long) device; 908 device->timer.data = (unsigned long) device;
867 device->timer.expires = jiffies + expires; 909 device->timer.expires = jiffies + expires;
868 add_timer(&device->timer); 910 add_timer(&device->timer);
@@ -871,15 +913,14 @@ dasd_set_timer(struct dasd_device *device, int expires)
871/* 913/*
872 * Clear timeout for a device. 914 * Clear timeout for a device.
873 */ 915 */
874void 916void dasd_device_clear_timer(struct dasd_device *device)
875dasd_clear_timer(struct dasd_device *device)
876{ 917{
877 if (timer_pending(&device->timer)) 918 if (timer_pending(&device->timer))
878 del_timer(&device->timer); 919 del_timer(&device->timer);
879} 920}
880 921
881static void 922static void dasd_handle_killed_request(struct ccw_device *cdev,
882dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) 923 unsigned long intparm)
883{ 924{
884 struct dasd_ccw_req *cqr; 925 struct dasd_ccw_req *cqr;
885 struct dasd_device *device; 926 struct dasd_device *device;
@@ -893,7 +934,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
893 return; 934 return;
894 } 935 }
895 936
896 device = (struct dasd_device *) cqr->device; 937 device = (struct dasd_device *) cqr->startdev;
897 if (device == NULL || 938 if (device == NULL ||
898 device != dasd_device_from_cdev_locked(cdev) || 939 device != dasd_device_from_cdev_locked(cdev) ||
899 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 940 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
@@ -905,46 +946,32 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
905 /* Schedule request to be retried. */ 946 /* Schedule request to be retried. */
906 cqr->status = DASD_CQR_QUEUED; 947 cqr->status = DASD_CQR_QUEUED;
907 948
908 dasd_clear_timer(device); 949 dasd_device_clear_timer(device);
909 dasd_schedule_bh(device); 950 dasd_schedule_device_bh(device);
910 dasd_put_device(device); 951 dasd_put_device(device);
911} 952}
912 953
913static void 954void dasd_generic_handle_state_change(struct dasd_device *device)
914dasd_handle_state_change_pending(struct dasd_device *device)
915{ 955{
916 struct dasd_ccw_req *cqr;
917 struct list_head *l, *n;
918
919 /* First of all start sense subsystem status request. */ 956 /* First of all start sense subsystem status request. */
920 dasd_eer_snss(device); 957 dasd_eer_snss(device);
921 958
922 device->stopped &= ~DASD_STOPPED_PENDING; 959 device->stopped &= ~DASD_STOPPED_PENDING;
923 960 dasd_schedule_device_bh(device);
924 /* restart all 'running' IO on queue */ 961 if (device->block)
925 list_for_each_safe(l, n, &device->ccw_queue) { 962 dasd_schedule_block_bh(device->block);
926 cqr = list_entry(l, struct dasd_ccw_req, list);
927 if (cqr->status == DASD_CQR_IN_IO) {
928 cqr->status = DASD_CQR_QUEUED;
929 }
930 }
931 dasd_clear_timer(device);
932 dasd_schedule_bh(device);
933} 963}
934 964
935/* 965/*
936 * Interrupt handler for "normal" ssch-io based dasd devices. 966 * Interrupt handler for "normal" ssch-io based dasd devices.
937 */ 967 */
938void 968void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
939dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 969 struct irb *irb)
940 struct irb *irb)
941{ 970{
942 struct dasd_ccw_req *cqr, *next; 971 struct dasd_ccw_req *cqr, *next;
943 struct dasd_device *device; 972 struct dasd_device *device;
944 unsigned long long now; 973 unsigned long long now;
945 int expires; 974 int expires;
946 dasd_era_t era;
947 char mask;
948 975
949 if (IS_ERR(irb)) { 976 if (IS_ERR(irb)) {
950 switch (PTR_ERR(irb)) { 977 switch (PTR_ERR(irb)) {
@@ -969,29 +996,25 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
969 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 996 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
970 (unsigned int) intparm); 997 (unsigned int) intparm);
971 998
972 /* first of all check for state change pending interrupt */ 999 /* check for unsolicited interrupts */
973 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1000 cqr = (struct dasd_ccw_req *) intparm;
974 if ((irb->scsw.dstat & mask) == mask) { 1001 if (!cqr || ((irb->scsw.cc == 1) &&
1002 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
1003 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
1004 if (cqr && cqr->status == DASD_CQR_IN_IO)
1005 cqr->status = DASD_CQR_QUEUED;
975 device = dasd_device_from_cdev_locked(cdev); 1006 device = dasd_device_from_cdev_locked(cdev);
976 if (!IS_ERR(device)) { 1007 if (!IS_ERR(device)) {
977 dasd_handle_state_change_pending(device); 1008 dasd_device_clear_timer(device);
1009 device->discipline->handle_unsolicited_interrupt(device,
1010 irb);
978 dasd_put_device(device); 1011 dasd_put_device(device);
979 } 1012 }
980 return; 1013 return;
981 } 1014 }
982 1015
983 cqr = (struct dasd_ccw_req *) intparm; 1016 device = (struct dasd_device *) cqr->startdev;
984 1017 if (!device ||
985 /* check for unsolicited interrupts */
986 if (cqr == NULL) {
987 MESSAGE(KERN_DEBUG,
988 "unsolicited interrupt received: bus_id %s",
989 cdev->dev.bus_id);
990 return;
991 }
992
993 device = (struct dasd_device *) cqr->device;
994 if (device == NULL ||
995 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1018 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
996 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1019 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
997 cdev->dev.bus_id); 1020 cdev->dev.bus_id);
@@ -999,12 +1022,12 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
999 } 1022 }
1000 1023
1001 /* Check for clear pending */ 1024 /* Check for clear pending */
1002 if (cqr->status == DASD_CQR_CLEAR && 1025 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1003 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1026 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1004 cqr->status = DASD_CQR_QUEUED; 1027 cqr->status = DASD_CQR_CLEARED;
1005 dasd_clear_timer(device); 1028 dasd_device_clear_timer(device);
1006 wake_up(&dasd_flush_wq); 1029 wake_up(&dasd_flush_wq);
1007 dasd_schedule_bh(device); 1030 dasd_schedule_device_bh(device);
1008 return; 1031 return;
1009 } 1032 }
1010 1033
@@ -1017,272 +1040,164 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1017 } 1040 }
1018 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1041 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1019 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1042 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
1020 1043 next = NULL;
1021 /* Find out the appropriate era_action. */
1022 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
1023 era = dasd_era_fatal;
1024 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1025 irb->scsw.cstat == 0 &&
1026 !irb->esw.esw0.erw.cons)
1027 era = dasd_era_none;
1028 else if (irb->esw.esw0.erw.cons)
1029 era = device->discipline->examine_error(cqr, irb);
1030 else
1031 era = dasd_era_recover;
1032
1033 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
1034 expires = 0; 1044 expires = 0;
1035 if (era == dasd_era_none) { 1045 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1036 cqr->status = DASD_CQR_DONE; 1046 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
1047 /* request was completed successfully */
1048 cqr->status = DASD_CQR_SUCCESS;
1037 cqr->stopclk = now; 1049 cqr->stopclk = now;
1038 /* Start first request on queue if possible -> fast_io. */ 1050 /* Start first request on queue if possible -> fast_io. */
1039 if (cqr->list.next != &device->ccw_queue) { 1051 if (cqr->devlist.next != &device->ccw_queue) {
1040 next = list_entry(cqr->list.next, 1052 next = list_entry(cqr->devlist.next,
1041 struct dasd_ccw_req, list); 1053 struct dasd_ccw_req, devlist);
1042 if ((next->status == DASD_CQR_QUEUED) &&
1043 (!device->stopped)) {
1044 if (device->discipline->start_IO(next) == 0)
1045 expires = next->expires;
1046 else
1047 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1048 "Interrupt fastpath "
1049 "failed!");
1050 }
1051 } 1054 }
1052 } else { /* error */ 1055 } else { /* error */
1053 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1056 memcpy(&cqr->irb, irb, sizeof(struct irb));
1054 if (device->features & DASD_FEATURE_ERPLOG) { 1057 if (device->features & DASD_FEATURE_ERPLOG) {
1055 /* dump sense data */
1056 dasd_log_sense(cqr, irb); 1058 dasd_log_sense(cqr, irb);
1057 } 1059 }
1058 switch (era) { 1060 /* If we have no sense data, or we just don't want complex ERP
1059 case dasd_era_fatal: 1061 * for this request, but if we have retries left, then just
1060 cqr->status = DASD_CQR_FAILED; 1062 * reset this request and retry it in the fastpath
1061 cqr->stopclk = now; 1063 */
1062 break; 1064 if (!(cqr->irb.esw.esw0.erw.cons &&
1063 case dasd_era_recover: 1065 test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
1066 cqr->retries > 0) {
1067 DEV_MESSAGE(KERN_DEBUG, device,
1068 "default ERP in fastpath (%i retries left)",
1069 cqr->retries);
1070 cqr->lpm = LPM_ANYPATH;
1071 cqr->status = DASD_CQR_QUEUED;
1072 next = cqr;
1073 } else
1064 cqr->status = DASD_CQR_ERROR; 1074 cqr->status = DASD_CQR_ERROR;
1065 break; 1075 }
1066 default: 1076 if (next && (next->status == DASD_CQR_QUEUED) &&
1067 BUG(); 1077 (!device->stopped)) {
1068 } 1078 if (device->discipline->start_IO(next) == 0)
1079 expires = next->expires;
1080 else
1081 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1082 "Interrupt fastpath "
1083 "failed!");
1069 } 1084 }
1070 if (expires != 0) 1085 if (expires != 0)
1071 dasd_set_timer(device, expires); 1086 dasd_device_set_timer(device, expires);
1072 else 1087 else
1073 dasd_clear_timer(device); 1088 dasd_device_clear_timer(device);
1074 dasd_schedule_bh(device); 1089 dasd_schedule_device_bh(device);
1075} 1090}
1076 1091
1077/* 1092/*
1078 * posts the buffer_cache about a finalized request 1093 * If we have an error on a dasd_block layer request then we cancel
1094 * and return all further requests from the same dasd_block as well.
1079 */ 1095 */
1080static inline void 1096static void __dasd_device_recovery(struct dasd_device *device,
1081dasd_end_request(struct request *req, int uptodate) 1097 struct dasd_ccw_req *ref_cqr)
1082{ 1098{
1083 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1099 struct list_head *l, *n;
1084 BUG(); 1100 struct dasd_ccw_req *cqr;
1085 add_disk_randomness(req->rq_disk);
1086 end_that_request_last(req, uptodate);
1087}
1088 1101
1089/* 1102 /*
1090 * Process finished error recovery ccw. 1103 * only requeue request that came from the dasd_block layer
1091 */ 1104 */
1092static inline void 1105 if (!ref_cqr->block)
1093__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1106 return;
1094{
1095 dasd_erp_fn_t erp_fn;
1096 1107
1097 if (cqr->status == DASD_CQR_DONE) 1108 list_for_each_safe(l, n, &device->ccw_queue) {
1098 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1109 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1099 else 1110 if (cqr->status == DASD_CQR_QUEUED &&
1100 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1111 ref_cqr->block == cqr->block) {
1101 erp_fn = device->discipline->erp_postaction(cqr); 1112 cqr->status = DASD_CQR_CLEARED;
1102 erp_fn(cqr); 1113 }
1103} 1114 }
1115};
1104 1116
1105/* 1117/*
1106 * Process ccw request queue. 1118 * Remove those ccw requests from the queue that need to be returned
1119 * to the upper layer.
1107 */ 1120 */
1108static void 1121static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1109__dasd_process_ccw_queue(struct dasd_device * device, 1122 struct list_head *final_queue)
1110 struct list_head *final_queue)
1111{ 1123{
1112 struct list_head *l, *n; 1124 struct list_head *l, *n;
1113 struct dasd_ccw_req *cqr; 1125 struct dasd_ccw_req *cqr;
1114 dasd_erp_fn_t erp_fn;
1115 1126
1116restart:
1117 /* Process request with final status. */ 1127 /* Process request with final status. */
1118 list_for_each_safe(l, n, &device->ccw_queue) { 1128 list_for_each_safe(l, n, &device->ccw_queue) {
1119 cqr = list_entry(l, struct dasd_ccw_req, list); 1129 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1130
1120 /* Stop list processing at the first non-final request. */ 1131 /* Stop list processing at the first non-final request. */
1121 if (cqr->status != DASD_CQR_DONE && 1132 if (cqr->status == DASD_CQR_QUEUED ||
1122 cqr->status != DASD_CQR_FAILED && 1133 cqr->status == DASD_CQR_IN_IO ||
1123 cqr->status != DASD_CQR_ERROR) 1134 cqr->status == DASD_CQR_CLEAR_PENDING)
1124 break; 1135 break;
1125 /* Process requests with DASD_CQR_ERROR */
1126 if (cqr->status == DASD_CQR_ERROR) { 1136 if (cqr->status == DASD_CQR_ERROR) {
1127 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1137 __dasd_device_recovery(device, cqr);
1128 cqr->status = DASD_CQR_FAILED;
1129 cqr->stopclk = get_clock();
1130 } else {
1131 if (cqr->irb.esw.esw0.erw.cons &&
1132 test_bit(DASD_CQR_FLAGS_USE_ERP,
1133 &cqr->flags)) {
1134 erp_fn = device->discipline->
1135 erp_action(cqr);
1136 erp_fn(cqr);
1137 } else
1138 dasd_default_erp_action(cqr);
1139 }
1140 goto restart;
1141 }
1142
1143 /* First of all call extended error reporting. */
1144 if (dasd_eer_enabled(device) &&
1145 cqr->status == DASD_CQR_FAILED) {
1146 dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
1147
1148 /* restart request */
1149 cqr->status = DASD_CQR_QUEUED;
1150 cqr->retries = 255;
1151 device->stopped |= DASD_STOPPED_QUIESCE;
1152 goto restart;
1153 } 1138 }
1154
1155 /* Process finished ERP request. */
1156 if (cqr->refers) {
1157 __dasd_process_erp(device, cqr);
1158 goto restart;
1159 }
1160
1161 /* Rechain finished requests to final queue */ 1139 /* Rechain finished requests to final queue */
1162 cqr->endclk = get_clock(); 1140 list_move_tail(&cqr->devlist, final_queue);
1163 list_move_tail(&cqr->list, final_queue);
1164 } 1141 }
1165} 1142}
1166 1143
1167static void
1168dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1169{
1170 struct request *req;
1171 struct dasd_device *device;
1172 int status;
1173
1174 req = (struct request *) data;
1175 device = cqr->device;
1176 dasd_profile_end(device, cqr, req);
1177 status = cqr->device->discipline->free_cp(cqr,req);
1178 spin_lock_irq(&device->request_queue_lock);
1179 dasd_end_request(req, status);
1180 spin_unlock_irq(&device->request_queue_lock);
1181}
1182
1183
1184/* 1144/*
1185 * Fetch requests from the block device queue. 1145 * the cqrs from the final queue are returned to the upper layer
1146 * by setting a dasd_block state and calling the callback function
1186 */ 1147 */
1187static void 1148static void __dasd_device_process_final_queue(struct dasd_device *device,
1188__dasd_process_blk_queue(struct dasd_device * device) 1149 struct list_head *final_queue)
1189{ 1150{
1190 struct request_queue *queue; 1151 struct list_head *l, *n;
1191 struct request *req;
1192 struct dasd_ccw_req *cqr; 1152 struct dasd_ccw_req *cqr;
1193 int nr_queued;
1194
1195 queue = device->request_queue;
1196 /* No queue ? Then there is nothing to do. */
1197 if (queue == NULL)
1198 return;
1199
1200 /*
1201 * We requeue request from the block device queue to the ccw
1202 * queue only in two states. In state DASD_STATE_READY the
1203 * partition detection is done and we need to requeue requests
1204 * for that. State DASD_STATE_ONLINE is normal block device
1205 * operation.
1206 */
1207 if (device->state != DASD_STATE_READY &&
1208 device->state != DASD_STATE_ONLINE)
1209 return;
1210 nr_queued = 0;
1211 /* Now we try to fetch requests from the request queue */
1212 list_for_each_entry(cqr, &device->ccw_queue, list)
1213 if (cqr->status == DASD_CQR_QUEUED)
1214 nr_queued++;
1215 while (!blk_queue_plugged(queue) &&
1216 elv_next_request(queue) &&
1217 nr_queued < DASD_CHANQ_MAX_SIZE) {
1218 req = elv_next_request(queue);
1219 1153
1220 if (device->features & DASD_FEATURE_READONLY && 1154 list_for_each_safe(l, n, final_queue) {
1221 rq_data_dir(req) == WRITE) { 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1222 DBF_DEV_EVENT(DBF_ERR, device, 1156 list_del_init(&cqr->devlist);
1223 "Rejecting write request %p", 1157 if (cqr->block)
1224 req); 1158 spin_lock_bh(&cqr->block->queue_lock);
1225 blkdev_dequeue_request(req); 1159 switch (cqr->status) {
1226 dasd_end_request(req, 0); 1160 case DASD_CQR_SUCCESS:
1227 continue; 1161 cqr->status = DASD_CQR_DONE;
1228 } 1162 break;
1229 if (device->stopped & DASD_STOPPED_DC_EIO) { 1163 case DASD_CQR_ERROR:
1230 blkdev_dequeue_request(req); 1164 cqr->status = DASD_CQR_NEED_ERP;
1231 dasd_end_request(req, 0); 1165 break;
1232 continue; 1166 case DASD_CQR_CLEARED:
1233 } 1167 cqr->status = DASD_CQR_TERMINATED;
1234 cqr = device->discipline->build_cp(device, req); 1168 break;
1235 if (IS_ERR(cqr)) { 1169 default:
1236 if (PTR_ERR(cqr) == -ENOMEM) 1170 DEV_MESSAGE(KERN_ERR, device,
1237 break; /* terminate request queue loop */ 1171 "wrong cqr status in __dasd_process_final_queue "
1238 if (PTR_ERR(cqr) == -EAGAIN) { 1172 "for cqr %p, status %x",
1239 /* 1173 cqr, cqr->status);
1240 * The current request cannot be build right 1174 BUG();
1241 * now, we have to try later. If this request
1242 * is the head-of-queue we stop the device
1243 * for 1/2 second.
1244 */
1245 if (!list_empty(&device->ccw_queue))
1246 break;
1247 device->stopped |= DASD_STOPPED_PENDING;
1248 dasd_set_timer(device, HZ/2);
1249 break;
1250 }
1251 DBF_DEV_EVENT(DBF_ERR, device,
1252 "CCW creation failed (rc=%ld) "
1253 "on request %p",
1254 PTR_ERR(cqr), req);
1255 blkdev_dequeue_request(req);
1256 dasd_end_request(req, 0);
1257 continue;
1258 } 1175 }
1259 cqr->callback = dasd_end_request_cb; 1176 if (cqr->block)
1260 cqr->callback_data = (void *) req; 1177 spin_unlock_bh(&cqr->block->queue_lock);
1261 cqr->status = DASD_CQR_QUEUED; 1178 if (cqr->callback != NULL)
1262 blkdev_dequeue_request(req); 1179 (cqr->callback)(cqr, cqr->callback_data);
1263 list_add_tail(&cqr->list, &device->ccw_queue);
1264 dasd_profile_start(device, cqr, req);
1265 nr_queued++;
1266 } 1180 }
1267} 1181}
1268 1182
1183
1184
1269/* 1185/*
1270 * Take a look at the first request on the ccw queue and check 1186 * Take a look at the first request on the ccw queue and check
1271 * if it reached its expire time. If so, terminate the IO. 1187 * if it reached its expire time. If so, terminate the IO.
1272 */ 1188 */
1273static void 1189static void __dasd_device_check_expire(struct dasd_device *device)
1274__dasd_check_expire(struct dasd_device * device)
1275{ 1190{
1276 struct dasd_ccw_req *cqr; 1191 struct dasd_ccw_req *cqr;
1277 1192
1278 if (list_empty(&device->ccw_queue)) 1193 if (list_empty(&device->ccw_queue))
1279 return; 1194 return;
1280 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1195 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1281 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1196 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1282 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1197 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1283 if (device->discipline->term_IO(cqr) != 0) { 1198 if (device->discipline->term_IO(cqr) != 0) {
1284 /* Hmpf, try again in 5 sec */ 1199 /* Hmpf, try again in 5 sec */
1285 dasd_set_timer(device, 5*HZ); 1200 dasd_device_set_timer(device, 5*HZ);
1286 DEV_MESSAGE(KERN_ERR, device, 1201 DEV_MESSAGE(KERN_ERR, device,
1287 "internal error - timeout (%is) expired " 1202 "internal error - timeout (%is) expired "
1288 "for cqr %p, termination failed, " 1203 "for cqr %p, termination failed, "
@@ -1301,77 +1216,53 @@ __dasd_check_expire(struct dasd_device * device)
1301 * Take a look at the first request on the ccw queue and check 1216 * Take a look at the first request on the ccw queue and check
1302 * if it needs to be started. 1217 * if it needs to be started.
1303 */ 1218 */
1304static void 1219static void __dasd_device_start_head(struct dasd_device *device)
1305__dasd_start_head(struct dasd_device * device)
1306{ 1220{
1307 struct dasd_ccw_req *cqr; 1221 struct dasd_ccw_req *cqr;
1308 int rc; 1222 int rc;
1309 1223
1310 if (list_empty(&device->ccw_queue)) 1224 if (list_empty(&device->ccw_queue))
1311 return; 1225 return;
1312 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1313 if (cqr->status != DASD_CQR_QUEUED) 1227 if (cqr->status != DASD_CQR_QUEUED)
1314 return; 1228 return;
1315 /* Non-temporary stop condition will trigger fail fast */ 1229 /* when device is stopped, return request to previous layer */
1316 if (device->stopped & ~DASD_STOPPED_PENDING && 1230 if (device->stopped) {
1317 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1231 cqr->status = DASD_CQR_CLEARED;
1318 (!dasd_eer_enabled(device))) { 1232 dasd_schedule_device_bh(device);
1319 cqr->status = DASD_CQR_FAILED;
1320 dasd_schedule_bh(device);
1321 return; 1233 return;
1322 } 1234 }
1323 /* Don't try to start requests if device is stopped */
1324 if (device->stopped)
1325 return;
1326 1235
1327 rc = device->discipline->start_IO(cqr); 1236 rc = device->discipline->start_IO(cqr);
1328 if (rc == 0) 1237 if (rc == 0)
1329 dasd_set_timer(device, cqr->expires); 1238 dasd_device_set_timer(device, cqr->expires);
1330 else if (rc == -EACCES) { 1239 else if (rc == -EACCES) {
1331 dasd_schedule_bh(device); 1240 dasd_schedule_device_bh(device);
1332 } else 1241 } else
1333 /* Hmpf, try again in 1/2 sec */ 1242 /* Hmpf, try again in 1/2 sec */
1334 dasd_set_timer(device, 50); 1243 dasd_device_set_timer(device, 50);
1335}
1336
1337static inline int
1338_wait_for_clear(struct dasd_ccw_req *cqr)
1339{
1340 return (cqr->status == DASD_CQR_QUEUED);
1341} 1244}
1342 1245
1343/* 1246/*
1344 * Remove all requests from the ccw queue (all = '1') or only block device 1247 * Go through all request on the dasd_device request queue,
1345 * requests in case all = '0'. 1248 * terminate them on the cdev if necessary, and return them to the
1346 * Take care of the erp-chain (chained via cqr->refers) and remove either 1249 * submitting layer via callback.
1347 * the whole erp-chain or none of the erp-requests. 1250 * Note:
1348 * If a request is currently running, term_IO is called and the request 1251 * Make sure that all 'submitting layers' still exist when
1349 * is re-queued. Prior to removing the terminated request we need to wait 1252 * this function is called!. In other words, when 'device' is a base
1350 * for the clear-interrupt. 1253 * device then all block layer requests must have been removed before
1351 * In case termination is not possible we stop processing and just finishing 1254 * via dasd_flush_block_queue.
1352 * the already moved requests.
1353 */ 1255 */
1354static int 1256int dasd_flush_device_queue(struct dasd_device *device)
1355dasd_flush_ccw_queue(struct dasd_device * device, int all)
1356{ 1257{
1357 struct dasd_ccw_req *cqr, *orig, *n; 1258 struct dasd_ccw_req *cqr, *n;
1358 int rc, i; 1259 int rc;
1359
1360 struct list_head flush_queue; 1260 struct list_head flush_queue;
1361 1261
1362 INIT_LIST_HEAD(&flush_queue); 1262 INIT_LIST_HEAD(&flush_queue);
1363 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1263 spin_lock_irq(get_ccwdev_lock(device->cdev));
1364 rc = 0; 1264 rc = 0;
1365restart: 1265 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1366 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
1367 /* get original request of erp request-chain */
1368 for (orig = cqr; orig->refers != NULL; orig = orig->refers);
1369
1370 /* Flush all request or only block device requests? */
1371 if (all == 0 && cqr->callback != dasd_end_request_cb &&
1372 orig->callback != dasd_end_request_cb) {
1373 continue;
1374 }
1375 /* Check status and move request to flush_queue */ 1266 /* Check status and move request to flush_queue */
1376 switch (cqr->status) { 1267 switch (cqr->status) {
1377 case DASD_CQR_IN_IO: 1268 case DASD_CQR_IN_IO:
@@ -1387,90 +1278,60 @@ restart:
1387 } 1278 }
1388 break; 1279 break;
1389 case DASD_CQR_QUEUED: 1280 case DASD_CQR_QUEUED:
1390 case DASD_CQR_ERROR:
1391 /* set request to FAILED */
1392 cqr->stopclk = get_clock(); 1281 cqr->stopclk = get_clock();
1393 cqr->status = DASD_CQR_FAILED; 1282 cqr->status = DASD_CQR_CLEARED;
1394 break; 1283 break;
1395 default: /* do not touch the others */ 1284 default: /* no need to modify the others */
1396 break; 1285 break;
1397 } 1286 }
1398 /* Rechain request (including erp chain) */ 1287 list_move_tail(&cqr->devlist, &flush_queue);
1399 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
1400 cqr->endclk = get_clock();
1401 list_move_tail(&cqr->list, &flush_queue);
1402 }
1403 if (i > 1)
1404 /* moved more than one request - need to restart */
1405 goto restart;
1406 } 1288 }
1407
1408finished: 1289finished:
1409 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1290 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1410 /* Now call the callback function of flushed requests */ 1291 /*
1411restart_cb: 1292 * After this point all requests must be in state CLEAR_PENDING,
1412 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1293 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1413 if (cqr->status == DASD_CQR_CLEAR) { 1294 * one of the others.
1414 /* wait for clear interrupt! */ 1295 */
1415 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1296 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1416 cqr->status = DASD_CQR_FAILED; 1297 wait_event(dasd_flush_wq,
1417 } 1298 (cqr->status != DASD_CQR_CLEAR_PENDING));
1418 /* Process finished ERP request. */ 1299 /*
1419 if (cqr->refers) { 1300 * Now set each request back to TERMINATED, DONE or NEED_ERP
1420 __dasd_process_erp(device, cqr); 1301 * and call the callback function of flushed requests
1421 /* restart list_for_xx loop since dasd_process_erp 1302 */
1422 * might remove multiple elements */ 1303 __dasd_device_process_final_queue(device, &flush_queue);
1423 goto restart_cb;
1424 }
1425 /* call the callback function */
1426 cqr->endclk = get_clock();
1427 if (cqr->callback != NULL)
1428 (cqr->callback)(cqr, cqr->callback_data);
1429 }
1430 return rc; 1304 return rc;
1431} 1305}
1432 1306
1433/* 1307/*
1434 * Acquire the device lock and process queues for the device. 1308 * Acquire the device lock and process queues for the device.
1435 */ 1309 */
1436static void 1310static void dasd_device_tasklet(struct dasd_device *device)
1437dasd_tasklet(struct dasd_device * device)
1438{ 1311{
1439 struct list_head final_queue; 1312 struct list_head final_queue;
1440 struct list_head *l, *n;
1441 struct dasd_ccw_req *cqr;
1442 1313
1443 atomic_set (&device->tasklet_scheduled, 0); 1314 atomic_set (&device->tasklet_scheduled, 0);
1444 INIT_LIST_HEAD(&final_queue); 1315 INIT_LIST_HEAD(&final_queue);
1445 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1316 spin_lock_irq(get_ccwdev_lock(device->cdev));
1446 /* Check expire time of first request on the ccw queue. */ 1317 /* Check expire time of first request on the ccw queue. */
1447 __dasd_check_expire(device); 1318 __dasd_device_check_expire(device);
1448 /* Finish off requests on ccw queue */ 1319 /* find final requests on ccw queue */
1449 __dasd_process_ccw_queue(device, &final_queue); 1320 __dasd_device_process_ccw_queue(device, &final_queue);
1450 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1321 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1451 /* Now call the callback function of requests with final status */ 1322 /* Now call the callback function of requests with final status */
1452 list_for_each_safe(l, n, &final_queue) { 1323 __dasd_device_process_final_queue(device, &final_queue);
1453 cqr = list_entry(l, struct dasd_ccw_req, list); 1324 spin_lock_irq(get_ccwdev_lock(device->cdev));
1454 list_del_init(&cqr->list);
1455 if (cqr->callback != NULL)
1456 (cqr->callback)(cqr, cqr->callback_data);
1457 }
1458 spin_lock_irq(&device->request_queue_lock);
1459 spin_lock(get_ccwdev_lock(device->cdev));
1460 /* Get new request from the block device request queue */
1461 __dasd_process_blk_queue(device);
1462 /* Now check if the head of the ccw queue needs to be started. */ 1325 /* Now check if the head of the ccw queue needs to be started. */
1463 __dasd_start_head(device); 1326 __dasd_device_start_head(device);
1464 spin_unlock(get_ccwdev_lock(device->cdev)); 1327 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1465 spin_unlock_irq(&device->request_queue_lock);
1466 dasd_put_device(device); 1328 dasd_put_device(device);
1467} 1329}
1468 1330
1469/* 1331/*
1470 * Schedules a call to dasd_tasklet over the device tasklet. 1332 * Schedules a call to dasd_tasklet over the device tasklet.
1471 */ 1333 */
1472void 1334void dasd_schedule_device_bh(struct dasd_device *device)
1473dasd_schedule_bh(struct dasd_device * device)
1474{ 1335{
1475 /* Protect against rescheduling. */ 1336 /* Protect against rescheduling. */
1476 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1337 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
@@ -1480,160 +1341,109 @@ dasd_schedule_bh(struct dasd_device * device)
1480} 1341}
1481 1342
1482/* 1343/*
1483 * Queue a request to the head of the ccw_queue. Start the I/O if 1344 * Queue a request to the head of the device ccw_queue.
1484 * possible. 1345 * Start the I/O if possible.
1485 */ 1346 */
1486void 1347void dasd_add_request_head(struct dasd_ccw_req *cqr)
1487dasd_add_request_head(struct dasd_ccw_req *req)
1488{ 1348{
1489 struct dasd_device *device; 1349 struct dasd_device *device;
1490 unsigned long flags; 1350 unsigned long flags;
1491 1351
1492 device = req->device; 1352 device = cqr->startdev;
1493 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1353 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1494 req->status = DASD_CQR_QUEUED; 1354 cqr->status = DASD_CQR_QUEUED;
1495 req->device = device; 1355 list_add(&cqr->devlist, &device->ccw_queue);
1496 list_add(&req->list, &device->ccw_queue);
1497 /* let the bh start the request to keep them in order */ 1356 /* let the bh start the request to keep them in order */
1498 dasd_schedule_bh(device); 1357 dasd_schedule_device_bh(device);
1499 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1358 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1500} 1359}
1501 1360
1502/* 1361/*
1503 * Queue a request to the tail of the ccw_queue. Start the I/O if 1362 * Queue a request to the tail of the device ccw_queue.
1504 * possible. 1363 * Start the I/O if possible.
1505 */ 1364 */
1506void 1365void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1507dasd_add_request_tail(struct dasd_ccw_req *req)
1508{ 1366{
1509 struct dasd_device *device; 1367 struct dasd_device *device;
1510 unsigned long flags; 1368 unsigned long flags;
1511 1369
1512 device = req->device; 1370 device = cqr->startdev;
1513 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1371 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1514 req->status = DASD_CQR_QUEUED; 1372 cqr->status = DASD_CQR_QUEUED;
1515 req->device = device; 1373 list_add_tail(&cqr->devlist, &device->ccw_queue);
1516 list_add_tail(&req->list, &device->ccw_queue);
1517 /* let the bh start the request to keep them in order */ 1374 /* let the bh start the request to keep them in order */
1518 dasd_schedule_bh(device); 1375 dasd_schedule_device_bh(device);
1519 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1376 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1520} 1377}
1521 1378
1522/* 1379/*
1523 * Wakeup callback. 1380 * Wakeup helper for the 'sleep_on' functions.
1524 */ 1381 */
1525static void 1382static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1526dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1527{ 1383{
1528 wake_up((wait_queue_head_t *) data); 1384 wake_up((wait_queue_head_t *) data);
1529} 1385}
1530 1386
1531static inline int 1387static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1532_wait_for_wakeup(struct dasd_ccw_req *cqr)
1533{ 1388{
1534 struct dasd_device *device; 1389 struct dasd_device *device;
1535 int rc; 1390 int rc;
1536 1391
1537 device = cqr->device; 1392 device = cqr->startdev;
1538 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1393 spin_lock_irq(get_ccwdev_lock(device->cdev));
1539 rc = ((cqr->status == DASD_CQR_DONE || 1394 rc = ((cqr->status == DASD_CQR_DONE ||
1540 cqr->status == DASD_CQR_FAILED) && 1395 cqr->status == DASD_CQR_NEED_ERP ||
1541 list_empty(&cqr->list)); 1396 cqr->status == DASD_CQR_TERMINATED) &&
1397 list_empty(&cqr->devlist));
1542 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1398 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1543 return rc; 1399 return rc;
1544} 1400}
1545 1401
1546/* 1402/*
1547 * Attempts to start a special ccw queue and waits for its completion. 1403 * Queue a request to the tail of the device ccw_queue and wait for
1404 * it's completion.
1548 */ 1405 */
1549int 1406int dasd_sleep_on(struct dasd_ccw_req *cqr)
1550dasd_sleep_on(struct dasd_ccw_req * cqr)
1551{ 1407{
1552 wait_queue_head_t wait_q; 1408 wait_queue_head_t wait_q;
1553 struct dasd_device *device; 1409 struct dasd_device *device;
1554 int rc; 1410 int rc;
1555 1411
1556 device = cqr->device; 1412 device = cqr->startdev;
1557 spin_lock_irq(get_ccwdev_lock(device->cdev));
1558 1413
1559 init_waitqueue_head (&wait_q); 1414 init_waitqueue_head (&wait_q);
1560 cqr->callback = dasd_wakeup_cb; 1415 cqr->callback = dasd_wakeup_cb;
1561 cqr->callback_data = (void *) &wait_q; 1416 cqr->callback_data = (void *) &wait_q;
1562 cqr->status = DASD_CQR_QUEUED; 1417 dasd_add_request_tail(cqr);
1563 list_add_tail(&cqr->list, &device->ccw_queue);
1564
1565 /* let the bh start the request to keep them in order */
1566 dasd_schedule_bh(device);
1567
1568 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1569
1570 wait_event(wait_q, _wait_for_wakeup(cqr)); 1418 wait_event(wait_q, _wait_for_wakeup(cqr));
1571 1419
1572 /* Request status is either done or failed. */ 1420 /* Request status is either done or failed. */
1573 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1421 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1574 return rc; 1422 return rc;
1575} 1423}
1576 1424
1577/* 1425/*
1578 * Attempts to start a special ccw queue and wait interruptible 1426 * Queue a request to the tail of the device ccw_queue and wait
1579 * for its completion. 1427 * interruptible for it's completion.
1580 */ 1428 */
1581int 1429int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1582dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1583{ 1430{
1584 wait_queue_head_t wait_q; 1431 wait_queue_head_t wait_q;
1585 struct dasd_device *device; 1432 struct dasd_device *device;
1586 int rc, finished; 1433 int rc;
1587
1588 device = cqr->device;
1589 spin_lock_irq(get_ccwdev_lock(device->cdev));
1590 1434
1435 device = cqr->startdev;
1591 init_waitqueue_head (&wait_q); 1436 init_waitqueue_head (&wait_q);
1592 cqr->callback = dasd_wakeup_cb; 1437 cqr->callback = dasd_wakeup_cb;
1593 cqr->callback_data = (void *) &wait_q; 1438 cqr->callback_data = (void *) &wait_q;
1594 cqr->status = DASD_CQR_QUEUED; 1439 dasd_add_request_tail(cqr);
1595 list_add_tail(&cqr->list, &device->ccw_queue); 1440 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1596 1441 if (rc == -ERESTARTSYS) {
1597 /* let the bh start the request to keep them in order */ 1442 dasd_cancel_req(cqr);
1598 dasd_schedule_bh(device); 1443 /* wait (non-interruptible) for final status */
1599 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1444 wait_event(wait_q, _wait_for_wakeup(cqr));
1600
1601 finished = 0;
1602 while (!finished) {
1603 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1604 if (rc != -ERESTARTSYS) {
1605 /* Request is final (done or failed) */
1606 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1607 break;
1608 }
1609 spin_lock_irq(get_ccwdev_lock(device->cdev));
1610 switch (cqr->status) {
1611 case DASD_CQR_IN_IO:
1612 /* terminate runnig cqr */
1613 if (device->discipline->term_IO) {
1614 cqr->retries = -1;
1615 device->discipline->term_IO(cqr);
1616 /* wait (non-interruptible) for final status
1617 * because signal ist still pending */
1618 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1619 wait_event(wait_q, _wait_for_wakeup(cqr));
1620 spin_lock_irq(get_ccwdev_lock(device->cdev));
1621 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1622 finished = 1;
1623 }
1624 break;
1625 case DASD_CQR_QUEUED:
1626 /* request */
1627 list_del_init(&cqr->list);
1628 rc = -EIO;
1629 finished = 1;
1630 break;
1631 default:
1632 /* cqr with 'non-interruptable' status - just wait */
1633 break;
1634 }
1635 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1636 } 1445 }
1446 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1637 return rc; 1447 return rc;
1638} 1448}
1639 1449
@@ -1643,25 +1453,23 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1643 * and be put back to status queued, before the special request is added 1453 * and be put back to status queued, before the special request is added
1644 * to the head of the queue. Then the special request is waited on normally. 1454 * to the head of the queue. Then the special request is waited on normally.
1645 */ 1455 */
1646static inline int 1456static inline int _dasd_term_running_cqr(struct dasd_device *device)
1647_dasd_term_running_cqr(struct dasd_device *device)
1648{ 1457{
1649 struct dasd_ccw_req *cqr; 1458 struct dasd_ccw_req *cqr;
1650 1459
1651 if (list_empty(&device->ccw_queue)) 1460 if (list_empty(&device->ccw_queue))
1652 return 0; 1461 return 0;
1653 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1462 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1654 return device->discipline->term_IO(cqr); 1463 return device->discipline->term_IO(cqr);
1655} 1464}
1656 1465
1657int 1466int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1658dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1659{ 1467{
1660 wait_queue_head_t wait_q; 1468 wait_queue_head_t wait_q;
1661 struct dasd_device *device; 1469 struct dasd_device *device;
1662 int rc; 1470 int rc;
1663 1471
1664 device = cqr->device; 1472 device = cqr->startdev;
1665 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1473 spin_lock_irq(get_ccwdev_lock(device->cdev));
1666 rc = _dasd_term_running_cqr(device); 1474 rc = _dasd_term_running_cqr(device);
1667 if (rc) { 1475 if (rc) {
@@ -1673,17 +1481,17 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1673 cqr->callback = dasd_wakeup_cb; 1481 cqr->callback = dasd_wakeup_cb;
1674 cqr->callback_data = (void *) &wait_q; 1482 cqr->callback_data = (void *) &wait_q;
1675 cqr->status = DASD_CQR_QUEUED; 1483 cqr->status = DASD_CQR_QUEUED;
1676 list_add(&cqr->list, &device->ccw_queue); 1484 list_add(&cqr->devlist, &device->ccw_queue);
1677 1485
1678 /* let the bh start the request to keep them in order */ 1486 /* let the bh start the request to keep them in order */
1679 dasd_schedule_bh(device); 1487 dasd_schedule_device_bh(device);
1680 1488
1681 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1489 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1682 1490
1683 wait_event(wait_q, _wait_for_wakeup(cqr)); 1491 wait_event(wait_q, _wait_for_wakeup(cqr));
1684 1492
1685 /* Request status is either done or failed. */ 1493 /* Request status is either done or failed. */
1686 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1687 return rc; 1495 return rc;
1688} 1496}
1689 1497
@@ -1692,11 +1500,14 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1692 * This is useful to timeout requests. The request will be 1500 * This is useful to timeout requests. The request will be
1693 * terminated if it is currently in i/o. 1501 * terminated if it is currently in i/o.
1694 * Returns 1 if the request has been terminated. 1502 * Returns 1 if the request has been terminated.
1503 * 0 if there was no need to terminate the request (not started yet)
1504 * negative error code if termination failed
1505 * Cancellation of a request is an asynchronous operation! The calling
1506 * function has to wait until the request is properly returned via callback.
1695 */ 1507 */
1696int 1508int dasd_cancel_req(struct dasd_ccw_req *cqr)
1697dasd_cancel_req(struct dasd_ccw_req *cqr)
1698{ 1509{
1699 struct dasd_device *device = cqr->device; 1510 struct dasd_device *device = cqr->startdev;
1700 unsigned long flags; 1511 unsigned long flags;
1701 int rc; 1512 int rc;
1702 1513
@@ -1704,74 +1515,453 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
1704 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1705 switch (cqr->status) { 1516 switch (cqr->status) {
1706 case DASD_CQR_QUEUED: 1517 case DASD_CQR_QUEUED:
1707 /* request was not started - just set to failed */ 1518 /* request was not started - just set to cleared */
1708 cqr->status = DASD_CQR_FAILED; 1519 cqr->status = DASD_CQR_CLEARED;
1709 break; 1520 break;
1710 case DASD_CQR_IN_IO: 1521 case DASD_CQR_IN_IO:
1711 /* request in IO - terminate IO and release again */ 1522 /* request in IO - terminate IO and release again */
1712 if (device->discipline->term_IO(cqr) != 0) 1523 rc = device->discipline->term_IO(cqr);
1713 /* what to do if unable to terminate ?????? 1524 if (rc) {
1714 e.g. not _IN_IO */ 1525 DEV_MESSAGE(KERN_ERR, device,
1715 cqr->status = DASD_CQR_FAILED; 1526 "dasd_cancel_req is unable "
1716 cqr->stopclk = get_clock(); 1527 " to terminate request %p, rc = %d",
1717 rc = 1; 1528 cqr, rc);
1529 } else {
1530 cqr->stopclk = get_clock();
1531 rc = 1;
1532 }
1718 break; 1533 break;
1719 case DASD_CQR_DONE: 1534 default: /* already finished or clear pending - do nothing */
1720 case DASD_CQR_FAILED:
1721 /* already finished - do nothing */
1722 break; 1535 break;
1723 default: 1536 }
1724 DEV_MESSAGE(KERN_ALERT, device, 1537 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1725 "invalid status %02x in request", 1538 dasd_schedule_device_bh(device);
1726 cqr->status); 1539 return rc;
1540}
1541
1542
1543/*
1544 * SECTION: Operations of the dasd_block layer.
1545 */
1546
1547/*
1548 * Timeout function for dasd_block. This is used when the block layer
1549 * is waiting for something that may not come reliably, (e.g. a state
1550 * change interrupt)
1551 */
1552static void dasd_block_timeout(unsigned long ptr)
1553{
1554 unsigned long flags;
1555 struct dasd_block *block;
1556
1557 block = (struct dasd_block *) ptr;
1558 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1559 /* re-activate request queue */
1560 block->base->stopped &= ~DASD_STOPPED_PENDING;
1561 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1562 dasd_schedule_block_bh(block);
1563}
1564
1565/*
1566 * Setup timeout for a dasd_block in jiffies.
1567 */
1568void dasd_block_set_timer(struct dasd_block *block, int expires)
1569{
1570 if (expires == 0) {
1571 if (timer_pending(&block->timer))
1572 del_timer(&block->timer);
1573 return;
1574 }
1575 if (timer_pending(&block->timer)) {
1576 if (mod_timer(&block->timer, jiffies + expires))
1577 return;
1578 }
1579 block->timer.function = dasd_block_timeout;
1580 block->timer.data = (unsigned long) block;
1581 block->timer.expires = jiffies + expires;
1582 add_timer(&block->timer);
1583}
1584
1585/*
1586 * Clear timeout for a dasd_block.
1587 */
1588void dasd_block_clear_timer(struct dasd_block *block)
1589{
1590 if (timer_pending(&block->timer))
1591 del_timer(&block->timer);
1592}
1593
1594/*
1595 * posts the buffer_cache about a finalized request
1596 */
1597static inline void dasd_end_request(struct request *req, int uptodate)
1598{
1599 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1727 BUG(); 1600 BUG();
1601 add_disk_randomness(req->rq_disk);
1602 end_that_request_last(req, uptodate);
1603}
1728 1604
1605/*
1606 * Process finished error recovery ccw.
1607 */
1608static inline void __dasd_block_process_erp(struct dasd_block *block,
1609 struct dasd_ccw_req *cqr)
1610{
1611 dasd_erp_fn_t erp_fn;
1612 struct dasd_device *device = block->base;
1613
1614 if (cqr->status == DASD_CQR_DONE)
1615 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1616 else
1617 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1618 erp_fn = device->discipline->erp_postaction(cqr);
1619 erp_fn(cqr);
1620}
1621
1622/*
1623 * Fetch requests from the block device queue.
1624 */
1625static void __dasd_process_request_queue(struct dasd_block *block)
1626{
1627 struct request_queue *queue;
1628 struct request *req;
1629 struct dasd_ccw_req *cqr;
1630 struct dasd_device *basedev;
1631 unsigned long flags;
1632 queue = block->request_queue;
1633 basedev = block->base;
1634 /* No queue ? Then there is nothing to do. */
1635 if (queue == NULL)
1636 return;
1637
1638 /*
1639 * We requeue request from the block device queue to the ccw
1640 * queue only in two states. In state DASD_STATE_READY the
1641 * partition detection is done and we need to requeue requests
1642 * for that. State DASD_STATE_ONLINE is normal block device
1643 * operation.
1644 */
1645 if (basedev->state < DASD_STATE_READY)
1646 return;
1647 /* Now we try to fetch requests from the request queue */
1648 while (!blk_queue_plugged(queue) &&
1649 elv_next_request(queue)) {
1650
1651 req = elv_next_request(queue);
1652
1653 if (basedev->features & DASD_FEATURE_READONLY &&
1654 rq_data_dir(req) == WRITE) {
1655 DBF_DEV_EVENT(DBF_ERR, basedev,
1656 "Rejecting write request %p",
1657 req);
1658 blkdev_dequeue_request(req);
1659 dasd_end_request(req, 0);
1660 continue;
1661 }
1662 cqr = basedev->discipline->build_cp(basedev, block, req);
1663 if (IS_ERR(cqr)) {
1664 if (PTR_ERR(cqr) == -EBUSY)
1665 break; /* normal end condition */
1666 if (PTR_ERR(cqr) == -ENOMEM)
1667 break; /* terminate request queue loop */
1668 if (PTR_ERR(cqr) == -EAGAIN) {
1669 /*
1670 * The current request cannot be build right
1671 * now, we have to try later. If this request
1672 * is the head-of-queue we stop the device
1673 * for 1/2 second.
1674 */
1675 if (!list_empty(&block->ccw_queue))
1676 break;
1677 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1678 basedev->stopped |= DASD_STOPPED_PENDING;
1679 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1680 dasd_block_set_timer(block, HZ/2);
1681 break;
1682 }
1683 DBF_DEV_EVENT(DBF_ERR, basedev,
1684 "CCW creation failed (rc=%ld) "
1685 "on request %p",
1686 PTR_ERR(cqr), req);
1687 blkdev_dequeue_request(req);
1688 dasd_end_request(req, 0);
1689 continue;
1690 }
1691 /*
1692 * Note: callback is set to dasd_return_cqr_cb in
1693 * __dasd_block_start_head to cover erp requests as well
1694 */
1695 cqr->callback_data = (void *) req;
1696 cqr->status = DASD_CQR_FILLED;
1697 blkdev_dequeue_request(req);
1698 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1699 dasd_profile_start(block, cqr, req);
1700 }
1701}
1702
1703static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1704{
1705 struct request *req;
1706 int status;
1707
1708 req = (struct request *) cqr->callback_data;
1709 dasd_profile_end(cqr->block, cqr, req);
1710 status = cqr->memdev->discipline->free_cp(cqr, req);
1711 dasd_end_request(req, status);
1712}
1713
1714/*
1715 * Process ccw request queue.
1716 */
1717static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1718 struct list_head *final_queue)
1719{
1720 struct list_head *l, *n;
1721 struct dasd_ccw_req *cqr;
1722 dasd_erp_fn_t erp_fn;
1723 unsigned long flags;
1724 struct dasd_device *base = block->base;
1725
1726restart:
1727 /* Process request with final status. */
1728 list_for_each_safe(l, n, &block->ccw_queue) {
1729 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1730 if (cqr->status != DASD_CQR_DONE &&
1731 cqr->status != DASD_CQR_FAILED &&
1732 cqr->status != DASD_CQR_NEED_ERP &&
1733 cqr->status != DASD_CQR_TERMINATED)
1734 continue;
1735
1736 if (cqr->status == DASD_CQR_TERMINATED) {
1737 base->discipline->handle_terminated_request(cqr);
1738 goto restart;
1739 }
1740
1741 /* Process requests that may be recovered */
1742 if (cqr->status == DASD_CQR_NEED_ERP) {
1743 if (cqr->irb.esw.esw0.erw.cons &&
1744 test_bit(DASD_CQR_FLAGS_USE_ERP,
1745 &cqr->flags)) {
1746 erp_fn = base->discipline->erp_action(cqr);
1747 erp_fn(cqr);
1748 }
1749 goto restart;
1750 }
1751
1752 /* First of all call extended error reporting. */
1753 if (dasd_eer_enabled(base) &&
1754 cqr->status == DASD_CQR_FAILED) {
1755 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1756
1757 /* restart request */
1758 cqr->status = DASD_CQR_FILLED;
1759 cqr->retries = 255;
1760 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1761 base->stopped |= DASD_STOPPED_QUIESCE;
1762 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1763 flags);
1764 goto restart;
1765 }
1766
1767 /* Process finished ERP request. */
1768 if (cqr->refers) {
1769 __dasd_block_process_erp(block, cqr);
1770 goto restart;
1771 }
1772
1773 /* Rechain finished requests to final queue */
1774 cqr->endclk = get_clock();
1775 list_move_tail(&cqr->blocklist, final_queue);
1776 }
1777}
1778
1779static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1780{
1781 dasd_schedule_block_bh(cqr->block);
1782}
1783
1784static void __dasd_block_start_head(struct dasd_block *block)
1785{
1786 struct dasd_ccw_req *cqr;
1787
1788 if (list_empty(&block->ccw_queue))
1789 return;
1790 /* We allways begin with the first requests on the queue, as some
1791 * of previously started requests have to be enqueued on a
1792 * dasd_device again for error recovery.
1793 */
1794 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1795 if (cqr->status != DASD_CQR_FILLED)
1796 continue;
1797 /* Non-temporary stop condition will trigger fail fast */
1798 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1799 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1800 (!dasd_eer_enabled(block->base))) {
1801 cqr->status = DASD_CQR_FAILED;
1802 dasd_schedule_block_bh(block);
1803 continue;
1804 }
1805 /* Don't try to start requests if device is stopped */
1806 if (block->base->stopped)
1807 return;
1808
1809 /* just a fail safe check, should not happen */
1810 if (!cqr->startdev)
1811 cqr->startdev = block->base;
1812
1813 /* make sure that the requests we submit find their way back */
1814 cqr->callback = dasd_return_cqr_cb;
1815
1816 dasd_add_request_tail(cqr);
1817 }
1818}
1819
1820/*
1821 * Central dasd_block layer routine. Takes requests from the generic
1822 * block layer request queue, creates ccw requests, enqueues them on
1823 * a dasd_device and processes ccw requests that have been returned.
1824 */
1825static void dasd_block_tasklet(struct dasd_block *block)
1826{
1827 struct list_head final_queue;
1828 struct list_head *l, *n;
1829 struct dasd_ccw_req *cqr;
1830
1831 atomic_set(&block->tasklet_scheduled, 0);
1832 INIT_LIST_HEAD(&final_queue);
1833 spin_lock(&block->queue_lock);
1834 /* Finish off requests on ccw queue */
1835 __dasd_process_block_ccw_queue(block, &final_queue);
1836 spin_unlock(&block->queue_lock);
1837 /* Now call the callback function of requests with final status */
1838 spin_lock_irq(&block->request_queue_lock);
1839 list_for_each_safe(l, n, &final_queue) {
1840 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1841 list_del_init(&cqr->blocklist);
1842 __dasd_cleanup_cqr(cqr);
1843 }
1844 spin_lock(&block->queue_lock);
1845 /* Get new request from the block device request queue */
1846 __dasd_process_request_queue(block);
1847 /* Now check if the head of the ccw queue needs to be started. */
1848 __dasd_block_start_head(block);
1849 spin_unlock(&block->queue_lock);
1850 spin_unlock_irq(&block->request_queue_lock);
1851 dasd_put_device(block->base);
1852}
1853
1854static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1855{
1856 wake_up(&dasd_flush_wq);
1857}
1858
1859/*
1860 * Go through all request on the dasd_block request queue, cancel them
1861 * on the respective dasd_device, and return them to the generic
1862 * block layer.
1863 */
1864static int dasd_flush_block_queue(struct dasd_block *block)
1865{
1866 struct dasd_ccw_req *cqr, *n;
1867 int rc, i;
1868 struct list_head flush_queue;
1869
1870 INIT_LIST_HEAD(&flush_queue);
1871 spin_lock_bh(&block->queue_lock);
1872 rc = 0;
1873restart:
1874 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1875 /* if this request currently owned by a dasd_device cancel it */
1876 if (cqr->status >= DASD_CQR_QUEUED)
1877 rc = dasd_cancel_req(cqr);
1878 if (rc < 0)
1879 break;
1880 /* Rechain request (including erp chain) so it won't be
1881 * touched by the dasd_block_tasklet anymore.
1882 * Replace the callback so we notice when the request
1883 * is returned from the dasd_device layer.
1884 */
1885 cqr->callback = _dasd_wake_block_flush_cb;
1886 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1887 list_move_tail(&cqr->blocklist, &flush_queue);
1888 if (i > 1)
1889 /* moved more than one request - need to restart */
1890 goto restart;
1891 }
1892 spin_unlock_bh(&block->queue_lock);
1893 /* Now call the callback function of flushed requests */
1894restart_cb:
1895 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1896 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1897 /* Process finished ERP request. */
1898 if (cqr->refers) {
1899 __dasd_block_process_erp(block, cqr);
1900 /* restart list_for_xx loop since dasd_process_erp
1901 * might remove multiple elements */
1902 goto restart_cb;
1903 }
1904 /* call the callback function */
1905 cqr->endclk = get_clock();
1906 list_del_init(&cqr->blocklist);
1907 __dasd_cleanup_cqr(cqr);
1729 } 1908 }
1730 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1731 dasd_schedule_bh(device);
1732 return rc; 1909 return rc;
1733} 1910}
1734 1911
1735/* 1912/*
1736 * SECTION: Block device operations (request queue, partitions, open, release). 1913 * Schedules a call to dasd_tasklet over the device tasklet.
1914 */
1915void dasd_schedule_block_bh(struct dasd_block *block)
1916{
1917 /* Protect against rescheduling. */
1918 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1919 return;
1920 /* life cycle of block is bound to it's base device */
1921 dasd_get_device(block->base);
1922 tasklet_hi_schedule(&block->tasklet);
1923}
1924
1925
1926/*
1927 * SECTION: external block device operations
1928 * (request queue handling, open, release, etc.)
1737 */ 1929 */
1738 1930
1739/* 1931/*
1740 * Dasd request queue function. Called from ll_rw_blk.c 1932 * Dasd request queue function. Called from ll_rw_blk.c
1741 */ 1933 */
1742static void 1934static void do_dasd_request(struct request_queue *queue)
1743do_dasd_request(struct request_queue * queue)
1744{ 1935{
1745 struct dasd_device *device; 1936 struct dasd_block *block;
1746 1937
1747 device = (struct dasd_device *) queue->queuedata; 1938 block = queue->queuedata;
1748 spin_lock(get_ccwdev_lock(device->cdev)); 1939 spin_lock(&block->queue_lock);
1749 /* Get new request from the block device request queue */ 1940 /* Get new request from the block device request queue */
1750 __dasd_process_blk_queue(device); 1941 __dasd_process_request_queue(block);
1751 /* Now check if the head of the ccw queue needs to be started. */ 1942 /* Now check if the head of the ccw queue needs to be started. */
1752 __dasd_start_head(device); 1943 __dasd_block_start_head(block);
1753 spin_unlock(get_ccwdev_lock(device->cdev)); 1944 spin_unlock(&block->queue_lock);
1754} 1945}
1755 1946
1756/* 1947/*
1757 * Allocate and initialize request queue and default I/O scheduler. 1948 * Allocate and initialize request queue and default I/O scheduler.
1758 */ 1949 */
1759static int 1950static int dasd_alloc_queue(struct dasd_block *block)
1760dasd_alloc_queue(struct dasd_device * device)
1761{ 1951{
1762 int rc; 1952 int rc;
1763 1953
1764 device->request_queue = blk_init_queue(do_dasd_request, 1954 block->request_queue = blk_init_queue(do_dasd_request,
1765 &device->request_queue_lock); 1955 &block->request_queue_lock);
1766 if (device->request_queue == NULL) 1956 if (block->request_queue == NULL)
1767 return -ENOMEM; 1957 return -ENOMEM;
1768 1958
1769 device->request_queue->queuedata = device; 1959 block->request_queue->queuedata = block;
1770 1960
1771 elevator_exit(device->request_queue->elevator); 1961 elevator_exit(block->request_queue->elevator);
1772 rc = elevator_init(device->request_queue, "deadline"); 1962 rc = elevator_init(block->request_queue, "deadline");
1773 if (rc) { 1963 if (rc) {
1774 blk_cleanup_queue(device->request_queue); 1964 blk_cleanup_queue(block->request_queue);
1775 return rc; 1965 return rc;
1776 } 1966 }
1777 return 0; 1967 return 0;
@@ -1780,79 +1970,76 @@ dasd_alloc_queue(struct dasd_device * device)
1780/* 1970/*
1781 * Allocate and initialize request queue. 1971 * Allocate and initialize request queue.
1782 */ 1972 */
1783static void 1973static void dasd_setup_queue(struct dasd_block *block)
1784dasd_setup_queue(struct dasd_device * device)
1785{ 1974{
1786 int max; 1975 int max;
1787 1976
1788 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1977 blk_queue_hardsect_size(block->request_queue, block->bp_block);
1789 max = device->discipline->max_blocks << device->s2b_shift; 1978 max = block->base->discipline->max_blocks << block->s2b_shift;
1790 blk_queue_max_sectors(device->request_queue, max); 1979 blk_queue_max_sectors(block->request_queue, max);
1791 blk_queue_max_phys_segments(device->request_queue, -1L); 1980 blk_queue_max_phys_segments(block->request_queue, -1L);
1792 blk_queue_max_hw_segments(device->request_queue, -1L); 1981 blk_queue_max_hw_segments(block->request_queue, -1L);
1793 blk_queue_max_segment_size(device->request_queue, -1L); 1982 blk_queue_max_segment_size(block->request_queue, -1L);
1794 blk_queue_segment_boundary(device->request_queue, -1L); 1983 blk_queue_segment_boundary(block->request_queue, -1L);
1795 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); 1984 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
1796} 1985}
1797 1986
1798/* 1987/*
1799 * Deactivate and free request queue. 1988 * Deactivate and free request queue.
1800 */ 1989 */
1801static void 1990static void dasd_free_queue(struct dasd_block *block)
1802dasd_free_queue(struct dasd_device * device)
1803{ 1991{
1804 if (device->request_queue) { 1992 if (block->request_queue) {
1805 blk_cleanup_queue(device->request_queue); 1993 blk_cleanup_queue(block->request_queue);
1806 device->request_queue = NULL; 1994 block->request_queue = NULL;
1807 } 1995 }
1808} 1996}
1809 1997
1810/* 1998/*
1811 * Flush request on the request queue. 1999 * Flush request on the request queue.
1812 */ 2000 */
1813static void 2001static void dasd_flush_request_queue(struct dasd_block *block)
1814dasd_flush_request_queue(struct dasd_device * device)
1815{ 2002{
1816 struct request *req; 2003 struct request *req;
1817 2004
1818 if (!device->request_queue) 2005 if (!block->request_queue)
1819 return; 2006 return;
1820 2007
1821 spin_lock_irq(&device->request_queue_lock); 2008 spin_lock_irq(&block->request_queue_lock);
1822 while ((req = elv_next_request(device->request_queue))) { 2009 while ((req = elv_next_request(block->request_queue))) {
1823 blkdev_dequeue_request(req); 2010 blkdev_dequeue_request(req);
1824 dasd_end_request(req, 0); 2011 dasd_end_request(req, 0);
1825 } 2012 }
1826 spin_unlock_irq(&device->request_queue_lock); 2013 spin_unlock_irq(&block->request_queue_lock);
1827} 2014}
1828 2015
1829static int 2016static int dasd_open(struct inode *inp, struct file *filp)
1830dasd_open(struct inode *inp, struct file *filp)
1831{ 2017{
1832 struct gendisk *disk = inp->i_bdev->bd_disk; 2018 struct gendisk *disk = inp->i_bdev->bd_disk;
1833 struct dasd_device *device = disk->private_data; 2019 struct dasd_block *block = disk->private_data;
2020 struct dasd_device *base = block->base;
1834 int rc; 2021 int rc;
1835 2022
1836 atomic_inc(&device->open_count); 2023 atomic_inc(&block->open_count);
1837 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2024 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
1838 rc = -ENODEV; 2025 rc = -ENODEV;
1839 goto unlock; 2026 goto unlock;
1840 } 2027 }
1841 2028
1842 if (!try_module_get(device->discipline->owner)) { 2029 if (!try_module_get(base->discipline->owner)) {
1843 rc = -EINVAL; 2030 rc = -EINVAL;
1844 goto unlock; 2031 goto unlock;
1845 } 2032 }
1846 2033
1847 if (dasd_probeonly) { 2034 if (dasd_probeonly) {
1848 DEV_MESSAGE(KERN_INFO, device, "%s", 2035 DEV_MESSAGE(KERN_INFO, base, "%s",
1849 "No access to device due to probeonly mode"); 2036 "No access to device due to probeonly mode");
1850 rc = -EPERM; 2037 rc = -EPERM;
1851 goto out; 2038 goto out;
1852 } 2039 }
1853 2040
1854 if (device->state <= DASD_STATE_BASIC) { 2041 if (base->state <= DASD_STATE_BASIC) {
1855 DBF_DEV_EVENT(DBF_ERR, device, " %s", 2042 DBF_DEV_EVENT(DBF_ERR, base, " %s",
1856 " Cannot open unrecognized device"); 2043 " Cannot open unrecognized device");
1857 rc = -ENODEV; 2044 rc = -ENODEV;
1858 goto out; 2045 goto out;
@@ -1861,41 +2048,41 @@ dasd_open(struct inode *inp, struct file *filp)
1861 return 0; 2048 return 0;
1862 2049
1863out: 2050out:
1864 module_put(device->discipline->owner); 2051 module_put(base->discipline->owner);
1865unlock: 2052unlock:
1866 atomic_dec(&device->open_count); 2053 atomic_dec(&block->open_count);
1867 return rc; 2054 return rc;
1868} 2055}
1869 2056
1870static int 2057static int dasd_release(struct inode *inp, struct file *filp)
1871dasd_release(struct inode *inp, struct file *filp)
1872{ 2058{
1873 struct gendisk *disk = inp->i_bdev->bd_disk; 2059 struct gendisk *disk = inp->i_bdev->bd_disk;
1874 struct dasd_device *device = disk->private_data; 2060 struct dasd_block *block = disk->private_data;
1875 2061
1876 atomic_dec(&device->open_count); 2062 atomic_dec(&block->open_count);
1877 module_put(device->discipline->owner); 2063 module_put(block->base->discipline->owner);
1878 return 0; 2064 return 0;
1879} 2065}
1880 2066
1881/* 2067/*
1882 * Return disk geometry. 2068 * Return disk geometry.
1883 */ 2069 */
1884static int 2070static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1885dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1886{ 2071{
1887 struct dasd_device *device; 2072 struct dasd_block *block;
2073 struct dasd_device *base;
1888 2074
1889 device = bdev->bd_disk->private_data; 2075 block = bdev->bd_disk->private_data;
1890 if (!device) 2076 base = block->base;
2077 if (!block)
1891 return -ENODEV; 2078 return -ENODEV;
1892 2079
1893 if (!device->discipline || 2080 if (!base->discipline ||
1894 !device->discipline->fill_geometry) 2081 !base->discipline->fill_geometry)
1895 return -EINVAL; 2082 return -EINVAL;
1896 2083
1897 device->discipline->fill_geometry(device, geo); 2084 base->discipline->fill_geometry(block, geo);
1898 geo->start = get_start_sect(bdev) >> device->s2b_shift; 2085 geo->start = get_start_sect(bdev) >> block->s2b_shift;
1899 return 0; 2086 return 0;
1900} 2087}
1901 2088
@@ -1909,6 +2096,9 @@ dasd_device_operations = {
1909 .getgeo = dasd_getgeo, 2096 .getgeo = dasd_getgeo,
1910}; 2097};
1911 2098
2099/*******************************************************************************
2100 * end of block device operations
2101 */
1912 2102
1913static void 2103static void
1914dasd_exit(void) 2104dasd_exit(void)
@@ -1937,9 +2127,8 @@ dasd_exit(void)
1937 * Initial attempt at a probe function. this can be simplified once 2127 * Initial attempt at a probe function. this can be simplified once
1938 * the other detection code is gone. 2128 * the other detection code is gone.
1939 */ 2129 */
1940int 2130int dasd_generic_probe(struct ccw_device *cdev,
1941dasd_generic_probe (struct ccw_device *cdev, 2131 struct dasd_discipline *discipline)
1942 struct dasd_discipline *discipline)
1943{ 2132{
1944 int ret; 2133 int ret;
1945 2134
@@ -1979,10 +2168,10 @@ dasd_generic_probe (struct ccw_device *cdev,
1979 * This will one day be called from a global not_oper handler. 2168 * This will one day be called from a global not_oper handler.
1980 * It is also used by driver_unregister during module unload. 2169 * It is also used by driver_unregister during module unload.
1981 */ 2170 */
1982void 2171void dasd_generic_remove(struct ccw_device *cdev)
1983dasd_generic_remove (struct ccw_device *cdev)
1984{ 2172{
1985 struct dasd_device *device; 2173 struct dasd_device *device;
2174 struct dasd_block *block;
1986 2175
1987 cdev->handler = NULL; 2176 cdev->handler = NULL;
1988 2177
@@ -2002,7 +2191,15 @@ dasd_generic_remove (struct ccw_device *cdev)
2002 */ 2191 */
2003 dasd_set_target_state(device, DASD_STATE_NEW); 2192 dasd_set_target_state(device, DASD_STATE_NEW);
2004 /* dasd_delete_device destroys the device reference. */ 2193 /* dasd_delete_device destroys the device reference. */
2194 block = device->block;
2195 device->block = NULL;
2005 dasd_delete_device(device); 2196 dasd_delete_device(device);
2197 /*
2198 * life cycle of block is bound to device, so delete it after
2199 * device was safely removed
2200 */
2201 if (block)
2202 dasd_free_block(block);
2006} 2203}
2007 2204
2008/* 2205/*
@@ -2010,10 +2207,8 @@ dasd_generic_remove (struct ccw_device *cdev)
2010 * the device is detected for the first time and is supposed to be used 2207 * the device is detected for the first time and is supposed to be used
2011 * or the user has started activation through sysfs. 2208 * or the user has started activation through sysfs.
2012 */ 2209 */
2013int 2210int dasd_generic_set_online(struct ccw_device *cdev,
2014dasd_generic_set_online (struct ccw_device *cdev, 2211 struct dasd_discipline *base_discipline)
2015 struct dasd_discipline *base_discipline)
2016
2017{ 2212{
2018 struct dasd_discipline *discipline; 2213 struct dasd_discipline *discipline;
2019 struct dasd_device *device; 2214 struct dasd_device *device;
@@ -2049,6 +2244,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
2049 device->base_discipline = base_discipline; 2244 device->base_discipline = base_discipline;
2050 device->discipline = discipline; 2245 device->discipline = discipline;
2051 2246
2247 /* check_device will allocate block device if necessary */
2052 rc = discipline->check_device(device); 2248 rc = discipline->check_device(device);
2053 if (rc) { 2249 if (rc) {
2054 printk (KERN_WARNING 2250 printk (KERN_WARNING
@@ -2068,6 +2264,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
2068 cdev->dev.bus_id); 2264 cdev->dev.bus_id);
2069 rc = -ENODEV; 2265 rc = -ENODEV;
2070 dasd_set_target_state(device, DASD_STATE_NEW); 2266 dasd_set_target_state(device, DASD_STATE_NEW);
2267 if (device->block)
2268 dasd_free_block(device->block);
2071 dasd_delete_device(device); 2269 dasd_delete_device(device);
2072 } else 2270 } else
2073 pr_debug("dasd_generic device %s found\n", 2271 pr_debug("dasd_generic device %s found\n",
@@ -2082,10 +2280,10 @@ dasd_generic_set_online (struct ccw_device *cdev,
2082 return rc; 2280 return rc;
2083} 2281}
2084 2282
2085int 2283int dasd_generic_set_offline(struct ccw_device *cdev)
2086dasd_generic_set_offline (struct ccw_device *cdev)
2087{ 2284{
2088 struct dasd_device *device; 2285 struct dasd_device *device;
2286 struct dasd_block *block;
2089 int max_count, open_count; 2287 int max_count, open_count;
2090 2288
2091 device = dasd_device_from_cdev(cdev); 2289 device = dasd_device_from_cdev(cdev);
@@ -2102,30 +2300,39 @@ dasd_generic_set_offline (struct ccw_device *cdev)
2102 * the blkdev_get in dasd_scan_partitions. We are only interested 2300 * the blkdev_get in dasd_scan_partitions. We are only interested
2103 * in the other openers. 2301 * in the other openers.
2104 */ 2302 */
2105 max_count = device->bdev ? 0 : -1; 2303 if (device->block) {
2106 open_count = (int) atomic_read(&device->open_count); 2304 struct dasd_block *block = device->block;
2107 if (open_count > max_count) { 2305 max_count = block->bdev ? 0 : -1;
2108 if (open_count > 0) 2306 open_count = (int) atomic_read(&block->open_count);
2109 printk (KERN_WARNING "Can't offline dasd device with " 2307 if (open_count > max_count) {
2110 "open count = %i.\n", 2308 if (open_count > 0)
2111 open_count); 2309 printk(KERN_WARNING "Can't offline dasd "
2112 else 2310 "device with open count = %i.\n",
2113 printk (KERN_WARNING "%s", 2311 open_count);
2114 "Can't offline dasd device due to internal " 2312 else
2115 "use\n"); 2313 printk(KERN_WARNING "%s",
2116 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2314 "Can't offline dasd device due "
2117 dasd_put_device(device); 2315 "to internal use\n");
2118 return -EBUSY; 2316 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2317 dasd_put_device(device);
2318 return -EBUSY;
2319 }
2119 } 2320 }
2120 dasd_set_target_state(device, DASD_STATE_NEW); 2321 dasd_set_target_state(device, DASD_STATE_NEW);
2121 /* dasd_delete_device destroys the device reference. */ 2322 /* dasd_delete_device destroys the device reference. */
2323 block = device->block;
2324 device->block = NULL;
2122 dasd_delete_device(device); 2325 dasd_delete_device(device);
2123 2326 /*
2327 * life cycle of block is bound to device, so delete it after
2328 * device was safely removed
2329 */
2330 if (block)
2331 dasd_free_block(block);
2124 return 0; 2332 return 0;
2125} 2333}
2126 2334
2127int 2335int dasd_generic_notify(struct ccw_device *cdev, int event)
2128dasd_generic_notify(struct ccw_device *cdev, int event)
2129{ 2336{
2130 struct dasd_device *device; 2337 struct dasd_device *device;
2131 struct dasd_ccw_req *cqr; 2338 struct dasd_ccw_req *cqr;
@@ -2146,27 +2353,22 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
2146 if (device->state < DASD_STATE_BASIC) 2353 if (device->state < DASD_STATE_BASIC)
2147 break; 2354 break;
2148 /* Device is active. We want to keep it. */ 2355 /* Device is active. We want to keep it. */
2149 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2356 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2150 list_for_each_entry(cqr, &device->ccw_queue, list) 2357 if (cqr->status == DASD_CQR_IN_IO) {
2151 if (cqr->status == DASD_CQR_IN_IO) 2358 cqr->status = DASD_CQR_QUEUED;
2152 cqr->status = DASD_CQR_FAILED; 2359 cqr->retries++;
2153 device->stopped |= DASD_STOPPED_DC_EIO; 2360 }
2154 } else { 2361 device->stopped |= DASD_STOPPED_DC_WAIT;
2155 list_for_each_entry(cqr, &device->ccw_queue, list) 2362 dasd_device_clear_timer(device);
2156 if (cqr->status == DASD_CQR_IN_IO) { 2363 dasd_schedule_device_bh(device);
2157 cqr->status = DASD_CQR_QUEUED;
2158 cqr->retries++;
2159 }
2160 device->stopped |= DASD_STOPPED_DC_WAIT;
2161 dasd_set_timer(device, 0);
2162 }
2163 dasd_schedule_bh(device);
2164 ret = 1; 2364 ret = 1;
2165 break; 2365 break;
2166 case CIO_OPER: 2366 case CIO_OPER:
2167 /* FIXME: add a sanity check. */ 2367 /* FIXME: add a sanity check. */
2168 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2368 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2169 dasd_schedule_bh(device); 2369 dasd_schedule_device_bh(device);
2370 if (device->block)
2371 dasd_schedule_block_bh(device->block);
2170 ret = 1; 2372 ret = 1;
2171 break; 2373 break;
2172 } 2374 }
@@ -2196,7 +2398,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2196 ccw->cda = (__u32)(addr_t)rdc_buffer; 2398 ccw->cda = (__u32)(addr_t)rdc_buffer;
2197 ccw->count = rdc_buffer_size; 2399 ccw->count = rdc_buffer_size;
2198 2400
2199 cqr->device = device; 2401 cqr->startdev = device;
2402 cqr->memdev = device;
2200 cqr->expires = 10*HZ; 2403 cqr->expires = 10*HZ;
2201 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2404 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2202 cqr->retries = 2; 2405 cqr->retries = 2;
@@ -2218,13 +2421,12 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2218 return PTR_ERR(cqr); 2421 return PTR_ERR(cqr);
2219 2422
2220 ret = dasd_sleep_on(cqr); 2423 ret = dasd_sleep_on(cqr);
2221 dasd_sfree_request(cqr, cqr->device); 2424 dasd_sfree_request(cqr, cqr->memdev);
2222 return ret; 2425 return ret;
2223} 2426}
2224EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2427EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2225 2428
2226static int __init 2429static int __init dasd_init(void)
2227dasd_init(void)
2228{ 2430{
2229 int rc; 2431 int rc;
2230 2432
@@ -2232,7 +2434,7 @@ dasd_init(void)
2232 init_waitqueue_head(&dasd_flush_wq); 2434 init_waitqueue_head(&dasd_flush_wq);
2233 2435
2234 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2436 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2235 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2437 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof(long));
2236 if (dasd_debug_area == NULL) { 2438 if (dasd_debug_area == NULL) {
2237 rc = -ENOMEM; 2439 rc = -ENOMEM;
2238 goto failed; 2440 goto failed;
@@ -2278,15 +2480,18 @@ EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2278EXPORT_SYMBOL(dasd_add_request_head); 2480EXPORT_SYMBOL(dasd_add_request_head);
2279EXPORT_SYMBOL(dasd_add_request_tail); 2481EXPORT_SYMBOL(dasd_add_request_tail);
2280EXPORT_SYMBOL(dasd_cancel_req); 2482EXPORT_SYMBOL(dasd_cancel_req);
2281EXPORT_SYMBOL(dasd_clear_timer); 2483EXPORT_SYMBOL(dasd_device_clear_timer);
2484EXPORT_SYMBOL(dasd_block_clear_timer);
2282EXPORT_SYMBOL(dasd_enable_device); 2485EXPORT_SYMBOL(dasd_enable_device);
2283EXPORT_SYMBOL(dasd_int_handler); 2486EXPORT_SYMBOL(dasd_int_handler);
2284EXPORT_SYMBOL(dasd_kfree_request); 2487EXPORT_SYMBOL(dasd_kfree_request);
2285EXPORT_SYMBOL(dasd_kick_device); 2488EXPORT_SYMBOL(dasd_kick_device);
2286EXPORT_SYMBOL(dasd_kmalloc_request); 2489EXPORT_SYMBOL(dasd_kmalloc_request);
2287EXPORT_SYMBOL(dasd_schedule_bh); 2490EXPORT_SYMBOL(dasd_schedule_device_bh);
2491EXPORT_SYMBOL(dasd_schedule_block_bh);
2288EXPORT_SYMBOL(dasd_set_target_state); 2492EXPORT_SYMBOL(dasd_set_target_state);
2289EXPORT_SYMBOL(dasd_set_timer); 2493EXPORT_SYMBOL(dasd_device_set_timer);
2494EXPORT_SYMBOL(dasd_block_set_timer);
2290EXPORT_SYMBOL(dasd_sfree_request); 2495EXPORT_SYMBOL(dasd_sfree_request);
2291EXPORT_SYMBOL(dasd_sleep_on); 2496EXPORT_SYMBOL(dasd_sleep_on);
2292EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2497EXPORT_SYMBOL(dasd_sleep_on_immediatly);
@@ -2300,4 +2505,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove);
2300EXPORT_SYMBOL_GPL(dasd_generic_notify); 2505EXPORT_SYMBOL_GPL(dasd_generic_notify);
2301EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2506EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2302EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2507EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2303 2508EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2509EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2510EXPORT_SYMBOL_GPL(dasd_alloc_block);
2511EXPORT_SYMBOL_GPL(dasd_free_block);