aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/block/dasd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/block/dasd.c')
-rw-r--r--drivers/s390/block/dasd.c1690
1 files changed, 950 insertions, 740 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e6bfce690ca3..1db15f3e5d20 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -48,13 +48,15 @@ MODULE_LICENSE("GPL");
48/* 48/*
49 * SECTION: prototypes for static functions of dasd.c 49 * SECTION: prototypes for static functions of dasd.c
50 */ 50 */
51static int dasd_alloc_queue(struct dasd_device * device); 51static int dasd_alloc_queue(struct dasd_block *);
52static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_block *);
53static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_block *);
54static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_block *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_block_queue(struct dasd_block *);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_device_tasklet(struct dasd_device *);
57static void dasd_block_tasklet(struct dasd_block *);
57static void do_kick_device(struct work_struct *); 58static void do_kick_device(struct work_struct *);
59static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
58 60
59/* 61/*
60 * SECTION: Operations on the device structure. 62 * SECTION: Operations on the device structure.
@@ -65,26 +67,23 @@ static wait_queue_head_t dasd_flush_wq;
65/* 67/*
66 * Allocate memory for a new device structure. 68 * Allocate memory for a new device structure.
67 */ 69 */
68struct dasd_device * 70struct dasd_device *dasd_alloc_device(void)
69dasd_alloc_device(void)
70{ 71{
71 struct dasd_device *device; 72 struct dasd_device *device;
72 73
73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
74 if (device == NULL) 75 if (!device)
75 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
76 /* open_count = 0 means device online but not in use */
77 atomic_set(&device->open_count, -1);
78 77
79 /* Get two pages for normal block device operations. */ 78 /* Get two pages for normal block device operations. */
80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 79 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
81 if (device->ccw_mem == NULL) { 80 if (!device->ccw_mem) {
82 kfree(device); 81 kfree(device);
83 return ERR_PTR(-ENOMEM); 82 return ERR_PTR(-ENOMEM);
84 } 83 }
85 /* Get one page for error recovery. */ 84 /* Get one page for error recovery. */
86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 85 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
87 if (device->erp_mem == NULL) { 86 if (!device->erp_mem) {
88 free_pages((unsigned long) device->ccw_mem, 1); 87 free_pages((unsigned long) device->ccw_mem, 1);
89 kfree(device); 88 kfree(device);
90 return ERR_PTR(-ENOMEM); 89 return ERR_PTR(-ENOMEM);
@@ -93,10 +92,9 @@ dasd_alloc_device(void)
93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 92 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 93 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
95 spin_lock_init(&device->mem_lock); 94 spin_lock_init(&device->mem_lock);
96 spin_lock_init(&device->request_queue_lock); 95 atomic_set(&device->tasklet_scheduled, 0);
97 atomic_set (&device->tasklet_scheduled, 0);
98 tasklet_init(&device->tasklet, 96 tasklet_init(&device->tasklet,
99 (void (*)(unsigned long)) dasd_tasklet, 97 (void (*)(unsigned long)) dasd_device_tasklet,
100 (unsigned long) device); 98 (unsigned long) device);
101 INIT_LIST_HEAD(&device->ccw_queue); 99 INIT_LIST_HEAD(&device->ccw_queue);
102 init_timer(&device->timer); 100 init_timer(&device->timer);
@@ -110,8 +108,7 @@ dasd_alloc_device(void)
110/* 108/*
111 * Free memory of a device structure. 109 * Free memory of a device structure.
112 */ 110 */
113void 111void dasd_free_device(struct dasd_device *device)
114dasd_free_device(struct dasd_device *device)
115{ 112{
116 kfree(device->private); 113 kfree(device->private);
117 free_page((unsigned long) device->erp_mem); 114 free_page((unsigned long) device->erp_mem);
@@ -120,10 +117,42 @@ dasd_free_device(struct dasd_device *device)
120} 117}
121 118
122/* 119/*
120 * Allocate memory for a new device structure.
121 */
122struct dasd_block *dasd_alloc_block(void)
123{
124 struct dasd_block *block;
125
126 block = kzalloc(sizeof(*block), GFP_ATOMIC);
127 if (!block)
128 return ERR_PTR(-ENOMEM);
129 /* open_count = 0 means device online but not in use */
130 atomic_set(&block->open_count, -1);
131
132 spin_lock_init(&block->request_queue_lock);
133 atomic_set(&block->tasklet_scheduled, 0);
134 tasklet_init(&block->tasklet,
135 (void (*)(unsigned long)) dasd_block_tasklet,
136 (unsigned long) block);
137 INIT_LIST_HEAD(&block->ccw_queue);
138 spin_lock_init(&block->queue_lock);
139 init_timer(&block->timer);
140
141 return block;
142}
143
144/*
145 * Free memory of a device structure.
146 */
147void dasd_free_block(struct dasd_block *block)
148{
149 kfree(block);
150}
151
152/*
123 * Make a new device known to the system. 153 * Make a new device known to the system.
124 */ 154 */
125static int 155static int dasd_state_new_to_known(struct dasd_device *device)
126dasd_state_new_to_known(struct dasd_device *device)
127{ 156{
128 int rc; 157 int rc;
129 158
@@ -133,12 +162,13 @@ dasd_state_new_to_known(struct dasd_device *device)
133 */ 162 */
134 dasd_get_device(device); 163 dasd_get_device(device);
135 164
136 rc = dasd_alloc_queue(device); 165 if (device->block) {
137 if (rc) { 166 rc = dasd_alloc_queue(device->block);
138 dasd_put_device(device); 167 if (rc) {
139 return rc; 168 dasd_put_device(device);
169 return rc;
170 }
140 } 171 }
141
142 device->state = DASD_STATE_KNOWN; 172 device->state = DASD_STATE_KNOWN;
143 return 0; 173 return 0;
144} 174}
@@ -146,21 +176,24 @@ dasd_state_new_to_known(struct dasd_device *device)
146/* 176/*
147 * Let the system forget about a device. 177 * Let the system forget about a device.
148 */ 178 */
149static int 179static int dasd_state_known_to_new(struct dasd_device *device)
150dasd_state_known_to_new(struct dasd_device * device)
151{ 180{
152 /* Disable extended error reporting for this device. */ 181 /* Disable extended error reporting for this device. */
153 dasd_eer_disable(device); 182 dasd_eer_disable(device);
154 /* Forget the discipline information. */ 183 /* Forget the discipline information. */
155 if (device->discipline) 184 if (device->discipline) {
185 if (device->discipline->uncheck_device)
186 device->discipline->uncheck_device(device);
156 module_put(device->discipline->owner); 187 module_put(device->discipline->owner);
188 }
157 device->discipline = NULL; 189 device->discipline = NULL;
158 if (device->base_discipline) 190 if (device->base_discipline)
159 module_put(device->base_discipline->owner); 191 module_put(device->base_discipline->owner);
160 device->base_discipline = NULL; 192 device->base_discipline = NULL;
161 device->state = DASD_STATE_NEW; 193 device->state = DASD_STATE_NEW;
162 194
163 dasd_free_queue(device); 195 if (device->block)
196 dasd_free_queue(device->block);
164 197
165 /* Give up reference we took in dasd_state_new_to_known. */ 198 /* Give up reference we took in dasd_state_new_to_known. */
166 dasd_put_device(device); 199 dasd_put_device(device);
@@ -170,19 +203,19 @@ dasd_state_known_to_new(struct dasd_device * device)
170/* 203/*
171 * Request the irq line for the device. 204 * Request the irq line for the device.
172 */ 205 */
173static int 206static int dasd_state_known_to_basic(struct dasd_device *device)
174dasd_state_known_to_basic(struct dasd_device * device)
175{ 207{
176 int rc; 208 int rc;
177 209
178 /* Allocate and register gendisk structure. */ 210 /* Allocate and register gendisk structure. */
179 rc = dasd_gendisk_alloc(device); 211 if (device->block) {
180 if (rc) 212 rc = dasd_gendisk_alloc(device->block);
181 return rc; 213 if (rc)
182 214 return rc;
215 }
183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 216 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 217 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1,
185 8 * sizeof (long)); 218 8 * sizeof(long));
186 debug_register_view(device->debug_area, &debug_sprintf_view); 219 debug_register_view(device->debug_area, &debug_sprintf_view);
187 debug_set_level(device->debug_area, DBF_WARNING); 220 debug_set_level(device->debug_area, DBF_WARNING);
188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 221 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
@@ -194,16 +227,17 @@ dasd_state_known_to_basic(struct dasd_device * device)
194/* 227/*
195 * Release the irq line for the device. Terminate any running i/o. 228 * Release the irq line for the device. Terminate any running i/o.
196 */ 229 */
197static int 230static int dasd_state_basic_to_known(struct dasd_device *device)
198dasd_state_basic_to_known(struct dasd_device * device)
199{ 231{
200 int rc; 232 int rc;
201 233 if (device->block) {
202 dasd_gendisk_free(device); 234 dasd_gendisk_free(device->block);
203 rc = dasd_flush_ccw_queue(device, 1); 235 dasd_block_clear_timer(device->block);
236 }
237 rc = dasd_flush_device_queue(device);
204 if (rc) 238 if (rc)
205 return rc; 239 return rc;
206 dasd_clear_timer(device); 240 dasd_device_clear_timer(device);
207 241
208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 242 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
209 if (device->debug_area != NULL) { 243 if (device->debug_area != NULL) {
@@ -228,26 +262,32 @@ dasd_state_basic_to_known(struct dasd_device * device)
228 * In case the analysis returns an error, the device setup is stopped 262 * In case the analysis returns an error, the device setup is stopped
229 * (a fake disk was already added to allow formatting). 263 * (a fake disk was already added to allow formatting).
230 */ 264 */
231static int 265static int dasd_state_basic_to_ready(struct dasd_device *device)
232dasd_state_basic_to_ready(struct dasd_device * device)
233{ 266{
234 int rc; 267 int rc;
268 struct dasd_block *block;
235 269
236 rc = 0; 270 rc = 0;
237 if (device->discipline->do_analysis != NULL) 271 block = device->block;
238 rc = device->discipline->do_analysis(device);
239 if (rc) {
240 if (rc != -EAGAIN)
241 device->state = DASD_STATE_UNFMT;
242 return rc;
243 }
244 /* make disk known with correct capacity */ 272 /* make disk known with correct capacity */
245 dasd_setup_queue(device); 273 if (block) {
246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 274 if (block->base->discipline->do_analysis != NULL)
247 device->state = DASD_STATE_READY; 275 rc = block->base->discipline->do_analysis(block);
248 rc = dasd_scan_partitions(device); 276 if (rc) {
249 if (rc) 277 if (rc != -EAGAIN)
250 device->state = DASD_STATE_BASIC; 278 device->state = DASD_STATE_UNFMT;
279 return rc;
280 }
281 dasd_setup_queue(block);
282 set_capacity(block->gdp,
283 block->blocks << block->s2b_shift);
284 device->state = DASD_STATE_READY;
285 rc = dasd_scan_partitions(block);
286 if (rc)
287 device->state = DASD_STATE_BASIC;
288 } else {
289 device->state = DASD_STATE_READY;
290 }
251 return rc; 291 return rc;
252} 292}
253 293
@@ -256,28 +296,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
256 * Forget format information. Check if the target level is basic 296 * Forget format information. Check if the target level is basic
257 * and if it is create fake disk for formatting. 297 * and if it is create fake disk for formatting.
258 */ 298 */
259static int 299static int dasd_state_ready_to_basic(struct dasd_device *device)
260dasd_state_ready_to_basic(struct dasd_device * device)
261{ 300{
262 int rc; 301 int rc;
263 302
264 rc = dasd_flush_ccw_queue(device, 0);
265 if (rc)
266 return rc;
267 dasd_destroy_partitions(device);
268 dasd_flush_request_queue(device);
269 device->blocks = 0;
270 device->bp_block = 0;
271 device->s2b_shift = 0;
272 device->state = DASD_STATE_BASIC; 303 device->state = DASD_STATE_BASIC;
304 if (device->block) {
305 struct dasd_block *block = device->block;
306 rc = dasd_flush_block_queue(block);
307 if (rc) {
308 device->state = DASD_STATE_READY;
309 return rc;
310 }
311 dasd_destroy_partitions(block);
312 dasd_flush_request_queue(block);
313 block->blocks = 0;
314 block->bp_block = 0;
315 block->s2b_shift = 0;
316 }
273 return 0; 317 return 0;
274} 318}
275 319
276/* 320/*
277 * Back to basic. 321 * Back to basic.
278 */ 322 */
279static int 323static int dasd_state_unfmt_to_basic(struct dasd_device *device)
280dasd_state_unfmt_to_basic(struct dasd_device * device)
281{ 324{
282 device->state = DASD_STATE_BASIC; 325 device->state = DASD_STATE_BASIC;
283 return 0; 326 return 0;
@@ -291,17 +334,31 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
291static int 334static int
292dasd_state_ready_to_online(struct dasd_device * device) 335dasd_state_ready_to_online(struct dasd_device * device)
293{ 336{
337 int rc;
338
339 if (device->discipline->ready_to_online) {
340 rc = device->discipline->ready_to_online(device);
341 if (rc)
342 return rc;
343 }
294 device->state = DASD_STATE_ONLINE; 344 device->state = DASD_STATE_ONLINE;
295 dasd_schedule_bh(device); 345 if (device->block)
346 dasd_schedule_block_bh(device->block);
296 return 0; 347 return 0;
297} 348}
298 349
299/* 350/*
300 * Stop the requeueing of requests again. 351 * Stop the requeueing of requests again.
301 */ 352 */
302static int 353static int dasd_state_online_to_ready(struct dasd_device *device)
303dasd_state_online_to_ready(struct dasd_device * device)
304{ 354{
355 int rc;
356
357 if (device->discipline->online_to_ready) {
358 rc = device->discipline->online_to_ready(device);
359 if (rc)
360 return rc;
361 }
305 device->state = DASD_STATE_READY; 362 device->state = DASD_STATE_READY;
306 return 0; 363 return 0;
307} 364}
@@ -309,8 +366,7 @@ dasd_state_online_to_ready(struct dasd_device * device)
309/* 366/*
310 * Device startup state changes. 367 * Device startup state changes.
311 */ 368 */
312static int 369static int dasd_increase_state(struct dasd_device *device)
313dasd_increase_state(struct dasd_device *device)
314{ 370{
315 int rc; 371 int rc;
316 372
@@ -345,8 +401,7 @@ dasd_increase_state(struct dasd_device *device)
345/* 401/*
346 * Device shutdown state changes. 402 * Device shutdown state changes.
347 */ 403 */
348static int 404static int dasd_decrease_state(struct dasd_device *device)
349dasd_decrease_state(struct dasd_device *device)
350{ 405{
351 int rc; 406 int rc;
352 407
@@ -381,8 +436,7 @@ dasd_decrease_state(struct dasd_device *device)
381/* 436/*
382 * This is the main startup/shutdown routine. 437 * This is the main startup/shutdown routine.
383 */ 438 */
384static void 439static void dasd_change_state(struct dasd_device *device)
385dasd_change_state(struct dasd_device *device)
386{ 440{
387 int rc; 441 int rc;
388 442
@@ -409,17 +463,15 @@ dasd_change_state(struct dasd_device *device)
409 * dasd_kick_device will schedule a call do do_kick_device to the kernel 463 * dasd_kick_device will schedule a call do do_kick_device to the kernel
410 * event daemon. 464 * event daemon.
411 */ 465 */
412static void 466static void do_kick_device(struct work_struct *work)
413do_kick_device(struct work_struct *work)
414{ 467{
415 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 468 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
416 dasd_change_state(device); 469 dasd_change_state(device);
417 dasd_schedule_bh(device); 470 dasd_schedule_device_bh(device);
418 dasd_put_device(device); 471 dasd_put_device(device);
419} 472}
420 473
421void 474void dasd_kick_device(struct dasd_device *device)
422dasd_kick_device(struct dasd_device *device)
423{ 475{
424 dasd_get_device(device); 476 dasd_get_device(device);
425 /* queue call to dasd_kick_device to the kernel event daemon. */ 477 /* queue call to dasd_kick_device to the kernel event daemon. */
@@ -429,8 +481,7 @@ dasd_kick_device(struct dasd_device *device)
429/* 481/*
430 * Set the target state for a device and starts the state change. 482 * Set the target state for a device and starts the state change.
431 */ 483 */
432void 484void dasd_set_target_state(struct dasd_device *device, int target)
433dasd_set_target_state(struct dasd_device *device, int target)
434{ 485{
435 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 486 /* If we are in probeonly mode stop at DASD_STATE_READY. */
436 if (dasd_probeonly && target > DASD_STATE_READY) 487 if (dasd_probeonly && target > DASD_STATE_READY)
@@ -447,14 +498,12 @@ dasd_set_target_state(struct dasd_device *device, int target)
447/* 498/*
448 * Enable devices with device numbers in [from..to]. 499 * Enable devices with device numbers in [from..to].
449 */ 500 */
450static inline int 501static inline int _wait_for_device(struct dasd_device *device)
451_wait_for_device(struct dasd_device *device)
452{ 502{
453 return (device->state == device->target); 503 return (device->state == device->target);
454} 504}
455 505
456void 506void dasd_enable_device(struct dasd_device *device)
457dasd_enable_device(struct dasd_device *device)
458{ 507{
459 dasd_set_target_state(device, DASD_STATE_ONLINE); 508 dasd_set_target_state(device, DASD_STATE_ONLINE);
460 if (device->state <= DASD_STATE_KNOWN) 509 if (device->state <= DASD_STATE_KNOWN)
@@ -475,20 +524,20 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
475/* 524/*
476 * Increments counter in global and local profiling structures. 525 * Increments counter in global and local profiling structures.
477 */ 526 */
478#define dasd_profile_counter(value, counter, device) \ 527#define dasd_profile_counter(value, counter, block) \
479{ \ 528{ \
480 int index; \ 529 int index; \
481 for (index = 0; index < 31 && value >> (2+index); index++); \ 530 for (index = 0; index < 31 && value >> (2+index); index++); \
482 dasd_global_profile.counter[index]++; \ 531 dasd_global_profile.counter[index]++; \
483 device->profile.counter[index]++; \ 532 block->profile.counter[index]++; \
484} 533}
485 534
486/* 535/*
487 * Add profiling information for cqr before execution. 536 * Add profiling information for cqr before execution.
488 */ 537 */
489static void 538static void dasd_profile_start(struct dasd_block *block,
490dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 539 struct dasd_ccw_req *cqr,
491 struct request *req) 540 struct request *req)
492{ 541{
493 struct list_head *l; 542 struct list_head *l;
494 unsigned int counter; 543 unsigned int counter;
@@ -498,19 +547,19 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
498 547
499 /* count the length of the chanq for statistics */ 548 /* count the length of the chanq for statistics */
500 counter = 0; 549 counter = 0;
501 list_for_each(l, &device->ccw_queue) 550 list_for_each(l, &block->ccw_queue)
502 if (++counter >= 31) 551 if (++counter >= 31)
503 break; 552 break;
504 dasd_global_profile.dasd_io_nr_req[counter]++; 553 dasd_global_profile.dasd_io_nr_req[counter]++;
505 device->profile.dasd_io_nr_req[counter]++; 554 block->profile.dasd_io_nr_req[counter]++;
506} 555}
507 556
508/* 557/*
509 * Add profiling information for cqr after execution. 558 * Add profiling information for cqr after execution.
510 */ 559 */
511static void 560static void dasd_profile_end(struct dasd_block *block,
512dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 561 struct dasd_ccw_req *cqr,
513 struct request *req) 562 struct request *req)
514{ 563{
515 long strtime, irqtime, endtime, tottime; /* in microseconds */ 564 long strtime, irqtime, endtime, tottime; /* in microseconds */
516 long tottimeps, sectors; 565 long tottimeps, sectors;
@@ -532,27 +581,27 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
532 581
533 if (!dasd_global_profile.dasd_io_reqs) 582 if (!dasd_global_profile.dasd_io_reqs)
534 memset(&dasd_global_profile, 0, 583 memset(&dasd_global_profile, 0,
535 sizeof (struct dasd_profile_info_t)); 584 sizeof(struct dasd_profile_info_t));
536 dasd_global_profile.dasd_io_reqs++; 585 dasd_global_profile.dasd_io_reqs++;
537 dasd_global_profile.dasd_io_sects += sectors; 586 dasd_global_profile.dasd_io_sects += sectors;
538 587
539 if (!device->profile.dasd_io_reqs) 588 if (!block->profile.dasd_io_reqs)
540 memset(&device->profile, 0, 589 memset(&block->profile, 0,
541 sizeof (struct dasd_profile_info_t)); 590 sizeof(struct dasd_profile_info_t));
542 device->profile.dasd_io_reqs++; 591 block->profile.dasd_io_reqs++;
543 device->profile.dasd_io_sects += sectors; 592 block->profile.dasd_io_sects += sectors;
544 593
545 dasd_profile_counter(sectors, dasd_io_secs, device); 594 dasd_profile_counter(sectors, dasd_io_secs, block);
546 dasd_profile_counter(tottime, dasd_io_times, device); 595 dasd_profile_counter(tottime, dasd_io_times, block);
547 dasd_profile_counter(tottimeps, dasd_io_timps, device); 596 dasd_profile_counter(tottimeps, dasd_io_timps, block);
548 dasd_profile_counter(strtime, dasd_io_time1, device); 597 dasd_profile_counter(strtime, dasd_io_time1, block);
549 dasd_profile_counter(irqtime, dasd_io_time2, device); 598 dasd_profile_counter(irqtime, dasd_io_time2, block);
550 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 599 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
551 dasd_profile_counter(endtime, dasd_io_time3, device); 600 dasd_profile_counter(endtime, dasd_io_time3, block);
552} 601}
553#else 602#else
554#define dasd_profile_start(device, cqr, req) do {} while (0) 603#define dasd_profile_start(block, cqr, req) do {} while (0)
555#define dasd_profile_end(device, cqr, req) do {} while (0) 604#define dasd_profile_end(block, cqr, req) do {} while (0)
556#endif /* CONFIG_DASD_PROFILE */ 605#endif /* CONFIG_DASD_PROFILE */
557 606
558/* 607/*
@@ -562,9 +611,9 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
562 * memory and 2) dasd_smalloc_request uses the static ccw memory 611 * memory and 2) dasd_smalloc_request uses the static ccw memory
563 * that gets allocated for each device. 612 * that gets allocated for each device.
564 */ 613 */
565struct dasd_ccw_req * 614struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
566dasd_kmalloc_request(char *magic, int cplength, int datasize, 615 int datasize,
567 struct dasd_device * device) 616 struct dasd_device *device)
568{ 617{
569 struct dasd_ccw_req *cqr; 618 struct dasd_ccw_req *cqr;
570 619
@@ -600,9 +649,9 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize,
600 return cqr; 649 return cqr;
601} 650}
602 651
603struct dasd_ccw_req * 652struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
604dasd_smalloc_request(char *magic, int cplength, int datasize, 653 int datasize,
605 struct dasd_device * device) 654 struct dasd_device *device)
606{ 655{
607 unsigned long flags; 656 unsigned long flags;
608 struct dasd_ccw_req *cqr; 657 struct dasd_ccw_req *cqr;
@@ -649,8 +698,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
649 * idal lists that might have been created by dasd_set_cda and the 698 * idal lists that might have been created by dasd_set_cda and the
650 * struct dasd_ccw_req itself. 699 * struct dasd_ccw_req itself.
651 */ 700 */
652void 701void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
653dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
654{ 702{
655#ifdef CONFIG_64BIT 703#ifdef CONFIG_64BIT
656 struct ccw1 *ccw; 704 struct ccw1 *ccw;
@@ -667,8 +715,7 @@ dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
667 dasd_put_device(device); 715 dasd_put_device(device);
668} 716}
669 717
670void 718void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
671dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
672{ 719{
673 unsigned long flags; 720 unsigned long flags;
674 721
@@ -681,14 +728,13 @@ dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
681/* 728/*
682 * Check discipline magic in cqr. 729 * Check discipline magic in cqr.
683 */ 730 */
684static inline int 731static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
685dasd_check_cqr(struct dasd_ccw_req *cqr)
686{ 732{
687 struct dasd_device *device; 733 struct dasd_device *device;
688 734
689 if (cqr == NULL) 735 if (cqr == NULL)
690 return -EINVAL; 736 return -EINVAL;
691 device = cqr->device; 737 device = cqr->startdev;
692 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 738 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
693 DEV_MESSAGE(KERN_WARNING, device, 739 DEV_MESSAGE(KERN_WARNING, device,
694 " dasd_ccw_req 0x%08x magic doesn't match" 740 " dasd_ccw_req 0x%08x magic doesn't match"
@@ -706,8 +752,7 @@ dasd_check_cqr(struct dasd_ccw_req *cqr)
706 * ccw_device_clear can fail if the i/o subsystem 752 * ccw_device_clear can fail if the i/o subsystem
707 * is in a bad mood. 753 * is in a bad mood.
708 */ 754 */
709int 755int dasd_term_IO(struct dasd_ccw_req *cqr)
710dasd_term_IO(struct dasd_ccw_req * cqr)
711{ 756{
712 struct dasd_device *device; 757 struct dasd_device *device;
713 int retries, rc; 758 int retries, rc;
@@ -717,13 +762,13 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
717 if (rc) 762 if (rc)
718 return rc; 763 return rc;
719 retries = 0; 764 retries = 0;
720 device = (struct dasd_device *) cqr->device; 765 device = (struct dasd_device *) cqr->startdev;
721 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 766 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
722 rc = ccw_device_clear(device->cdev, (long) cqr); 767 rc = ccw_device_clear(device->cdev, (long) cqr);
723 switch (rc) { 768 switch (rc) {
724 case 0: /* termination successful */ 769 case 0: /* termination successful */
725 cqr->retries--; 770 cqr->retries--;
726 cqr->status = DASD_CQR_CLEAR; 771 cqr->status = DASD_CQR_CLEAR_PENDING;
727 cqr->stopclk = get_clock(); 772 cqr->stopclk = get_clock();
728 cqr->starttime = 0; 773 cqr->starttime = 0;
729 DBF_DEV_EVENT(DBF_DEBUG, device, 774 DBF_DEV_EVENT(DBF_DEBUG, device,
@@ -753,7 +798,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
753 } 798 }
754 retries++; 799 retries++;
755 } 800 }
756 dasd_schedule_bh(device); 801 dasd_schedule_device_bh(device);
757 return rc; 802 return rc;
758} 803}
759 804
@@ -761,8 +806,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
761 * Start the i/o. This start_IO can fail if the channel is really busy. 806 * Start the i/o. This start_IO can fail if the channel is really busy.
762 * In that case set up a timer to start the request later. 807 * In that case set up a timer to start the request later.
763 */ 808 */
764int 809int dasd_start_IO(struct dasd_ccw_req *cqr)
765dasd_start_IO(struct dasd_ccw_req * cqr)
766{ 810{
767 struct dasd_device *device; 811 struct dasd_device *device;
768 int rc; 812 int rc;
@@ -771,12 +815,12 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
771 rc = dasd_check_cqr(cqr); 815 rc = dasd_check_cqr(cqr);
772 if (rc) 816 if (rc)
773 return rc; 817 return rc;
774 device = (struct dasd_device *) cqr->device; 818 device = (struct dasd_device *) cqr->startdev;
775 if (cqr->retries < 0) { 819 if (cqr->retries < 0) {
776 DEV_MESSAGE(KERN_DEBUG, device, 820 DEV_MESSAGE(KERN_DEBUG, device,
777 "start_IO: request %p (%02x/%i) - no retry left.", 821 "start_IO: request %p (%02x/%i) - no retry left.",
778 cqr, cqr->status, cqr->retries); 822 cqr, cqr->status, cqr->retries);
779 cqr->status = DASD_CQR_FAILED; 823 cqr->status = DASD_CQR_ERROR;
780 return -EIO; 824 return -EIO;
781 } 825 }
782 cqr->startclk = get_clock(); 826 cqr->startclk = get_clock();
@@ -833,8 +877,7 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
833 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 877 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
834 * DASD_CQR_QUEUED for 2) and 3). 878 * DASD_CQR_QUEUED for 2) and 3).
835 */ 879 */
836static void 880static void dasd_device_timeout(unsigned long ptr)
837dasd_timeout_device(unsigned long ptr)
838{ 881{
839 unsigned long flags; 882 unsigned long flags;
840 struct dasd_device *device; 883 struct dasd_device *device;
@@ -844,14 +887,13 @@ dasd_timeout_device(unsigned long ptr)
844 /* re-activate request queue */ 887 /* re-activate request queue */
845 device->stopped &= ~DASD_STOPPED_PENDING; 888 device->stopped &= ~DASD_STOPPED_PENDING;
846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 889 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
847 dasd_schedule_bh(device); 890 dasd_schedule_device_bh(device);
848} 891}
849 892
850/* 893/*
851 * Setup timeout for a device in jiffies. 894 * Setup timeout for a device in jiffies.
852 */ 895 */
853void 896void dasd_device_set_timer(struct dasd_device *device, int expires)
854dasd_set_timer(struct dasd_device *device, int expires)
855{ 897{
856 if (expires == 0) { 898 if (expires == 0) {
857 if (timer_pending(&device->timer)) 899 if (timer_pending(&device->timer))
@@ -862,7 +904,7 @@ dasd_set_timer(struct dasd_device *device, int expires)
862 if (mod_timer(&device->timer, jiffies + expires)) 904 if (mod_timer(&device->timer, jiffies + expires))
863 return; 905 return;
864 } 906 }
865 device->timer.function = dasd_timeout_device; 907 device->timer.function = dasd_device_timeout;
866 device->timer.data = (unsigned long) device; 908 device->timer.data = (unsigned long) device;
867 device->timer.expires = jiffies + expires; 909 device->timer.expires = jiffies + expires;
868 add_timer(&device->timer); 910 add_timer(&device->timer);
@@ -871,15 +913,14 @@ dasd_set_timer(struct dasd_device *device, int expires)
871/* 913/*
872 * Clear timeout for a device. 914 * Clear timeout for a device.
873 */ 915 */
874void 916void dasd_device_clear_timer(struct dasd_device *device)
875dasd_clear_timer(struct dasd_device *device)
876{ 917{
877 if (timer_pending(&device->timer)) 918 if (timer_pending(&device->timer))
878 del_timer(&device->timer); 919 del_timer(&device->timer);
879} 920}
880 921
881static void 922static void dasd_handle_killed_request(struct ccw_device *cdev,
882dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) 923 unsigned long intparm)
883{ 924{
884 struct dasd_ccw_req *cqr; 925 struct dasd_ccw_req *cqr;
885 struct dasd_device *device; 926 struct dasd_device *device;
@@ -893,7 +934,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
893 return; 934 return;
894 } 935 }
895 936
896 device = (struct dasd_device *) cqr->device; 937 device = (struct dasd_device *) cqr->startdev;
897 if (device == NULL || 938 if (device == NULL ||
898 device != dasd_device_from_cdev_locked(cdev) || 939 device != dasd_device_from_cdev_locked(cdev) ||
899 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 940 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
@@ -905,46 +946,32 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
905 /* Schedule request to be retried. */ 946 /* Schedule request to be retried. */
906 cqr->status = DASD_CQR_QUEUED; 947 cqr->status = DASD_CQR_QUEUED;
907 948
908 dasd_clear_timer(device); 949 dasd_device_clear_timer(device);
909 dasd_schedule_bh(device); 950 dasd_schedule_device_bh(device);
910 dasd_put_device(device); 951 dasd_put_device(device);
911} 952}
912 953
913static void 954void dasd_generic_handle_state_change(struct dasd_device *device)
914dasd_handle_state_change_pending(struct dasd_device *device)
915{ 955{
916 struct dasd_ccw_req *cqr;
917 struct list_head *l, *n;
918
919 /* First of all start sense subsystem status request. */ 956 /* First of all start sense subsystem status request. */
920 dasd_eer_snss(device); 957 dasd_eer_snss(device);
921 958
922 device->stopped &= ~DASD_STOPPED_PENDING; 959 device->stopped &= ~DASD_STOPPED_PENDING;
923 960 dasd_schedule_device_bh(device);
924 /* restart all 'running' IO on queue */ 961 if (device->block)
925 list_for_each_safe(l, n, &device->ccw_queue) { 962 dasd_schedule_block_bh(device->block);
926 cqr = list_entry(l, struct dasd_ccw_req, list);
927 if (cqr->status == DASD_CQR_IN_IO) {
928 cqr->status = DASD_CQR_QUEUED;
929 }
930 }
931 dasd_clear_timer(device);
932 dasd_schedule_bh(device);
933} 963}
934 964
935/* 965/*
936 * Interrupt handler for "normal" ssch-io based dasd devices. 966 * Interrupt handler for "normal" ssch-io based dasd devices.
937 */ 967 */
938void 968void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
939dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 969 struct irb *irb)
940 struct irb *irb)
941{ 970{
942 struct dasd_ccw_req *cqr, *next; 971 struct dasd_ccw_req *cqr, *next;
943 struct dasd_device *device; 972 struct dasd_device *device;
944 unsigned long long now; 973 unsigned long long now;
945 int expires; 974 int expires;
946 dasd_era_t era;
947 char mask;
948 975
949 if (IS_ERR(irb)) { 976 if (IS_ERR(irb)) {
950 switch (PTR_ERR(irb)) { 977 switch (PTR_ERR(irb)) {
@@ -969,29 +996,25 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
969 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 996 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
970 (unsigned int) intparm); 997 (unsigned int) intparm);
971 998
972 /* first of all check for state change pending interrupt */ 999 /* check for unsolicited interrupts */
973 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1000 cqr = (struct dasd_ccw_req *) intparm;
974 if ((irb->scsw.dstat & mask) == mask) { 1001 if (!cqr || ((irb->scsw.cc == 1) &&
1002 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
1003 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
1004 if (cqr && cqr->status == DASD_CQR_IN_IO)
1005 cqr->status = DASD_CQR_QUEUED;
975 device = dasd_device_from_cdev_locked(cdev); 1006 device = dasd_device_from_cdev_locked(cdev);
976 if (!IS_ERR(device)) { 1007 if (!IS_ERR(device)) {
977 dasd_handle_state_change_pending(device); 1008 dasd_device_clear_timer(device);
1009 device->discipline->handle_unsolicited_interrupt(device,
1010 irb);
978 dasd_put_device(device); 1011 dasd_put_device(device);
979 } 1012 }
980 return; 1013 return;
981 } 1014 }
982 1015
983 cqr = (struct dasd_ccw_req *) intparm; 1016 device = (struct dasd_device *) cqr->startdev;
984 1017 if (!device ||
985 /* check for unsolicited interrupts */
986 if (cqr == NULL) {
987 MESSAGE(KERN_DEBUG,
988 "unsolicited interrupt received: bus_id %s",
989 cdev->dev.bus_id);
990 return;
991 }
992
993 device = (struct dasd_device *) cqr->device;
994 if (device == NULL ||
995 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1018 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
996 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1019 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
997 cdev->dev.bus_id); 1020 cdev->dev.bus_id);
@@ -999,12 +1022,12 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
999 } 1022 }
1000 1023
1001 /* Check for clear pending */ 1024 /* Check for clear pending */
1002 if (cqr->status == DASD_CQR_CLEAR && 1025 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1003 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1026 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1004 cqr->status = DASD_CQR_QUEUED; 1027 cqr->status = DASD_CQR_CLEARED;
1005 dasd_clear_timer(device); 1028 dasd_device_clear_timer(device);
1006 wake_up(&dasd_flush_wq); 1029 wake_up(&dasd_flush_wq);
1007 dasd_schedule_bh(device); 1030 dasd_schedule_device_bh(device);
1008 return; 1031 return;
1009 } 1032 }
1010 1033
@@ -1017,277 +1040,170 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1017 } 1040 }
1018 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1041 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1019 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1042 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
1020 1043 next = NULL;
1021 /* Find out the appropriate era_action. */
1022 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
1023 era = dasd_era_fatal;
1024 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1025 irb->scsw.cstat == 0 &&
1026 !irb->esw.esw0.erw.cons)
1027 era = dasd_era_none;
1028 else if (irb->esw.esw0.erw.cons)
1029 era = device->discipline->examine_error(cqr, irb);
1030 else
1031 era = dasd_era_recover;
1032
1033 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
1034 expires = 0; 1044 expires = 0;
1035 if (era == dasd_era_none) { 1045 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1036 cqr->status = DASD_CQR_DONE; 1046 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
1047 /* request was completed successfully */
1048 cqr->status = DASD_CQR_SUCCESS;
1037 cqr->stopclk = now; 1049 cqr->stopclk = now;
1038 /* Start first request on queue if possible -> fast_io. */ 1050 /* Start first request on queue if possible -> fast_io. */
1039 if (cqr->list.next != &device->ccw_queue) { 1051 if (cqr->devlist.next != &device->ccw_queue) {
1040 next = list_entry(cqr->list.next, 1052 next = list_entry(cqr->devlist.next,
1041 struct dasd_ccw_req, list); 1053 struct dasd_ccw_req, devlist);
1042 if ((next->status == DASD_CQR_QUEUED) &&
1043 (!device->stopped)) {
1044 if (device->discipline->start_IO(next) == 0)
1045 expires = next->expires;
1046 else
1047 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1048 "Interrupt fastpath "
1049 "failed!");
1050 }
1051 } 1054 }
1052 } else { /* error */ 1055 } else { /* error */
1053 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1056 memcpy(&cqr->irb, irb, sizeof(struct irb));
1054 if (device->features & DASD_FEATURE_ERPLOG) { 1057 if (device->features & DASD_FEATURE_ERPLOG) {
1055 /* dump sense data */
1056 dasd_log_sense(cqr, irb); 1058 dasd_log_sense(cqr, irb);
1057 } 1059 }
1058 switch (era) { 1060 /* If we have no sense data, or we just don't want complex ERP
1059 case dasd_era_fatal: 1061 * for this request, but if we have retries left, then just
1060 cqr->status = DASD_CQR_FAILED; 1062 * reset this request and retry it in the fastpath
1061 cqr->stopclk = now; 1063 */
1062 break; 1064 if (!(cqr->irb.esw.esw0.erw.cons &&
1063 case dasd_era_recover: 1065 test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
1066 cqr->retries > 0) {
1067 DEV_MESSAGE(KERN_DEBUG, device,
1068 "default ERP in fastpath (%i retries left)",
1069 cqr->retries);
1070 cqr->lpm = LPM_ANYPATH;
1071 cqr->status = DASD_CQR_QUEUED;
1072 next = cqr;
1073 } else
1064 cqr->status = DASD_CQR_ERROR; 1074 cqr->status = DASD_CQR_ERROR;
1065 break; 1075 }
1066 default: 1076 if (next && (next->status == DASD_CQR_QUEUED) &&
1067 BUG(); 1077 (!device->stopped)) {
1068 } 1078 if (device->discipline->start_IO(next) == 0)
1079 expires = next->expires;
1080 else
1081 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1082 "Interrupt fastpath "
1083 "failed!");
1069 } 1084 }
1070 if (expires != 0) 1085 if (expires != 0)
1071 dasd_set_timer(device, expires); 1086 dasd_device_set_timer(device, expires);
1072 else 1087 else
1073 dasd_clear_timer(device); 1088 dasd_device_clear_timer(device);
1074 dasd_schedule_bh(device); 1089 dasd_schedule_device_bh(device);
1075} 1090}
1076 1091
1077/* 1092/*
1078 * posts the buffer_cache about a finalized request 1093 * If we have an error on a dasd_block layer request then we cancel
1094 * and return all further requests from the same dasd_block as well.
1079 */ 1095 */
1080static inline void 1096static void __dasd_device_recovery(struct dasd_device *device,
1081dasd_end_request(struct request *req, int uptodate) 1097 struct dasd_ccw_req *ref_cqr)
1082{ 1098{
1083 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1099 struct list_head *l, *n;
1084 BUG(); 1100 struct dasd_ccw_req *cqr;
1085 add_disk_randomness(req->rq_disk);
1086 end_that_request_last(req, uptodate);
1087}
1088 1101
1089/* 1102 /*
1090 * Process finished error recovery ccw. 1103 * only requeue request that came from the dasd_block layer
1091 */ 1104 */
1092static inline void 1105 if (!ref_cqr->block)
1093__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1106 return;
1094{
1095 dasd_erp_fn_t erp_fn;
1096 1107
1097 if (cqr->status == DASD_CQR_DONE) 1108 list_for_each_safe(l, n, &device->ccw_queue) {
1098 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1109 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1099 else 1110 if (cqr->status == DASD_CQR_QUEUED &&
1100 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1111 ref_cqr->block == cqr->block) {
1101 erp_fn = device->discipline->erp_postaction(cqr); 1112 cqr->status = DASD_CQR_CLEARED;
1102 erp_fn(cqr); 1113 }
1103} 1114 }
1115};
1104 1116
1105/* 1117/*
1106 * Process ccw request queue. 1118 * Remove those ccw requests from the queue that need to be returned
1119 * to the upper layer.
1107 */ 1120 */
1108static void 1121static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1109__dasd_process_ccw_queue(struct dasd_device * device, 1122 struct list_head *final_queue)
1110 struct list_head *final_queue)
1111{ 1123{
1112 struct list_head *l, *n; 1124 struct list_head *l, *n;
1113 struct dasd_ccw_req *cqr; 1125 struct dasd_ccw_req *cqr;
1114 dasd_erp_fn_t erp_fn;
1115 1126
1116restart:
1117 /* Process request with final status. */ 1127 /* Process request with final status. */
1118 list_for_each_safe(l, n, &device->ccw_queue) { 1128 list_for_each_safe(l, n, &device->ccw_queue) {
1119 cqr = list_entry(l, struct dasd_ccw_req, list); 1129 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1130
1120 /* Stop list processing at the first non-final request. */ 1131 /* Stop list processing at the first non-final request. */
1121 if (cqr->status != DASD_CQR_DONE && 1132 if (cqr->status == DASD_CQR_QUEUED ||
1122 cqr->status != DASD_CQR_FAILED && 1133 cqr->status == DASD_CQR_IN_IO ||
1123 cqr->status != DASD_CQR_ERROR) 1134 cqr->status == DASD_CQR_CLEAR_PENDING)
1124 break; 1135 break;
1125 /* Process requests with DASD_CQR_ERROR */
1126 if (cqr->status == DASD_CQR_ERROR) { 1136 if (cqr->status == DASD_CQR_ERROR) {
1127 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1137 __dasd_device_recovery(device, cqr);
1128 cqr->status = DASD_CQR_FAILED;
1129 cqr->stopclk = get_clock();
1130 } else {
1131 if (cqr->irb.esw.esw0.erw.cons &&
1132 test_bit(DASD_CQR_FLAGS_USE_ERP,
1133 &cqr->flags)) {
1134 erp_fn = device->discipline->
1135 erp_action(cqr);
1136 erp_fn(cqr);
1137 } else
1138 dasd_default_erp_action(cqr);
1139 }
1140 goto restart;
1141 }
1142
1143 /* First of all call extended error reporting. */
1144 if (dasd_eer_enabled(device) &&
1145 cqr->status == DASD_CQR_FAILED) {
1146 dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
1147
1148 /* restart request */
1149 cqr->status = DASD_CQR_QUEUED;
1150 cqr->retries = 255;
1151 device->stopped |= DASD_STOPPED_QUIESCE;
1152 goto restart;
1153 } 1138 }
1154
1155 /* Process finished ERP request. */
1156 if (cqr->refers) {
1157 __dasd_process_erp(device, cqr);
1158 goto restart;
1159 }
1160
1161 /* Rechain finished requests to final queue */ 1139 /* Rechain finished requests to final queue */
1162 cqr->endclk = get_clock(); 1140 list_move_tail(&cqr->devlist, final_queue);
1163 list_move_tail(&cqr->list, final_queue);
1164 } 1141 }
1165} 1142}
1166 1143
1167static void
1168dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1169{
1170 struct request *req;
1171 struct dasd_device *device;
1172 int status;
1173
1174 req = (struct request *) data;
1175 device = cqr->device;
1176 dasd_profile_end(device, cqr, req);
1177 status = cqr->device->discipline->free_cp(cqr,req);
1178 spin_lock_irq(&device->request_queue_lock);
1179 dasd_end_request(req, status);
1180 spin_unlock_irq(&device->request_queue_lock);
1181}
1182
1183
1184/* 1144/*
1185 * Fetch requests from the block device queue. 1145 * the cqrs from the final queue are returned to the upper layer
1146 * by setting a dasd_block state and calling the callback function
1186 */ 1147 */
1187static void 1148static void __dasd_device_process_final_queue(struct dasd_device *device,
1188__dasd_process_blk_queue(struct dasd_device * device) 1149 struct list_head *final_queue)
1189{ 1150{
1190 struct request_queue *queue; 1151 struct list_head *l, *n;
1191 struct request *req;
1192 struct dasd_ccw_req *cqr; 1152 struct dasd_ccw_req *cqr;
1193 int nr_queued;
1194
1195 queue = device->request_queue;
1196 /* No queue ? Then there is nothing to do. */
1197 if (queue == NULL)
1198 return;
1199
1200 /*
1201 * We requeue request from the block device queue to the ccw
1202 * queue only in two states. In state DASD_STATE_READY the
1203 * partition detection is done and we need to requeue requests
1204 * for that. State DASD_STATE_ONLINE is normal block device
1205 * operation.
1206 */
1207 if (device->state != DASD_STATE_READY &&
1208 device->state != DASD_STATE_ONLINE)
1209 return;
1210 nr_queued = 0;
1211 /* Now we try to fetch requests from the request queue */
1212 list_for_each_entry(cqr, &device->ccw_queue, list)
1213 if (cqr->status == DASD_CQR_QUEUED)
1214 nr_queued++;
1215 while (!blk_queue_plugged(queue) &&
1216 elv_next_request(queue) &&
1217 nr_queued < DASD_CHANQ_MAX_SIZE) {
1218 req = elv_next_request(queue);
1219 1153
1220 if (device->features & DASD_FEATURE_READONLY && 1154 list_for_each_safe(l, n, final_queue) {
1221 rq_data_dir(req) == WRITE) { 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1222 DBF_DEV_EVENT(DBF_ERR, device, 1156 list_del_init(&cqr->devlist);
1223 "Rejecting write request %p", 1157 if (cqr->block)
1224 req); 1158 spin_lock_bh(&cqr->block->queue_lock);
1225 blkdev_dequeue_request(req); 1159 switch (cqr->status) {
1226 dasd_end_request(req, 0); 1160 case DASD_CQR_SUCCESS:
1227 continue; 1161 cqr->status = DASD_CQR_DONE;
1228 } 1162 break;
1229 if (device->stopped & DASD_STOPPED_DC_EIO) { 1163 case DASD_CQR_ERROR:
1230 blkdev_dequeue_request(req); 1164 cqr->status = DASD_CQR_NEED_ERP;
1231 dasd_end_request(req, 0); 1165 break;
1232 continue; 1166 case DASD_CQR_CLEARED:
1233 } 1167 cqr->status = DASD_CQR_TERMINATED;
1234 cqr = device->discipline->build_cp(device, req); 1168 break;
1235 if (IS_ERR(cqr)) { 1169 default:
1236 if (PTR_ERR(cqr) == -ENOMEM) 1170 DEV_MESSAGE(KERN_ERR, device,
1237 break; /* terminate request queue loop */ 1171 "wrong cqr status in __dasd_process_final_queue "
1238 if (PTR_ERR(cqr) == -EAGAIN) { 1172 "for cqr %p, status %x",
1239 /* 1173 cqr, cqr->status);
1240 * The current request cannot be build right 1174 BUG();
1241 * now, we have to try later. If this request
1242 * is the head-of-queue we stop the device
1243 * for 1/2 second.
1244 */
1245 if (!list_empty(&device->ccw_queue))
1246 break;
1247 device->stopped |= DASD_STOPPED_PENDING;
1248 dasd_set_timer(device, HZ/2);
1249 break;
1250 }
1251 DBF_DEV_EVENT(DBF_ERR, device,
1252 "CCW creation failed (rc=%ld) "
1253 "on request %p",
1254 PTR_ERR(cqr), req);
1255 blkdev_dequeue_request(req);
1256 dasd_end_request(req, 0);
1257 continue;
1258 } 1175 }
1259 cqr->callback = dasd_end_request_cb; 1176 if (cqr->block)
1260 cqr->callback_data = (void *) req; 1177 spin_unlock_bh(&cqr->block->queue_lock);
1261 cqr->status = DASD_CQR_QUEUED; 1178 if (cqr->callback != NULL)
1262 blkdev_dequeue_request(req); 1179 (cqr->callback)(cqr, cqr->callback_data);
1263 list_add_tail(&cqr->list, &device->ccw_queue);
1264 dasd_profile_start(device, cqr, req);
1265 nr_queued++;
1266 } 1180 }
1267} 1181}
1268 1182
1183
1184
1269/* 1185/*
1270 * Take a look at the first request on the ccw queue and check 1186 * Take a look at the first request on the ccw queue and check
1271 * if it reached its expire time. If so, terminate the IO. 1187 * if it reached its expire time. If so, terminate the IO.
1272 */ 1188 */
1273static void 1189static void __dasd_device_check_expire(struct dasd_device *device)
1274__dasd_check_expire(struct dasd_device * device)
1275{ 1190{
1276 struct dasd_ccw_req *cqr; 1191 struct dasd_ccw_req *cqr;
1277 1192
1278 if (list_empty(&device->ccw_queue)) 1193 if (list_empty(&device->ccw_queue))
1279 return; 1194 return;
1280 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1195 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1281 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1196 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1282 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1197 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1283 if (device->discipline->term_IO(cqr) != 0) { 1198 if (device->discipline->term_IO(cqr) != 0) {
1284 /* Hmpf, try again in 5 sec */ 1199 /* Hmpf, try again in 5 sec */
1285 dasd_set_timer(device, 5*HZ);
1286 DEV_MESSAGE(KERN_ERR, device, 1200 DEV_MESSAGE(KERN_ERR, device,
1287 "internal error - timeout (%is) expired " 1201 "internal error - timeout (%is) expired "
1288 "for cqr %p, termination failed, " 1202 "for cqr %p, termination failed, "
1289 "retrying in 5s", 1203 "retrying in 5s",
1290 (cqr->expires/HZ), cqr); 1204 (cqr->expires/HZ), cqr);
1205 cqr->expires += 5*HZ;
1206 dasd_device_set_timer(device, 5*HZ);
1291 } else { 1207 } else {
1292 DEV_MESSAGE(KERN_ERR, device, 1208 DEV_MESSAGE(KERN_ERR, device,
1293 "internal error - timeout (%is) expired " 1209 "internal error - timeout (%is) expired "
@@ -1301,77 +1217,53 @@ __dasd_check_expire(struct dasd_device * device)
1301 * Take a look at the first request on the ccw queue and check 1217 * Take a look at the first request on the ccw queue and check
1302 * if it needs to be started. 1218 * if it needs to be started.
1303 */ 1219 */
1304static void 1220static void __dasd_device_start_head(struct dasd_device *device)
1305__dasd_start_head(struct dasd_device * device)
1306{ 1221{
1307 struct dasd_ccw_req *cqr; 1222 struct dasd_ccw_req *cqr;
1308 int rc; 1223 int rc;
1309 1224
1310 if (list_empty(&device->ccw_queue)) 1225 if (list_empty(&device->ccw_queue))
1311 return; 1226 return;
1312 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1227 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1313 if (cqr->status != DASD_CQR_QUEUED) 1228 if (cqr->status != DASD_CQR_QUEUED)
1314 return; 1229 return;
1315 /* Non-temporary stop condition will trigger fail fast */ 1230 /* when device is stopped, return request to previous layer */
1316 if (device->stopped & ~DASD_STOPPED_PENDING && 1231 if (device->stopped) {
1317 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1232 cqr->status = DASD_CQR_CLEARED;
1318 (!dasd_eer_enabled(device))) { 1233 dasd_schedule_device_bh(device);
1319 cqr->status = DASD_CQR_FAILED;
1320 dasd_schedule_bh(device);
1321 return; 1234 return;
1322 } 1235 }
1323 /* Don't try to start requests if device is stopped */
1324 if (device->stopped)
1325 return;
1326 1236
1327 rc = device->discipline->start_IO(cqr); 1237 rc = device->discipline->start_IO(cqr);
1328 if (rc == 0) 1238 if (rc == 0)
1329 dasd_set_timer(device, cqr->expires); 1239 dasd_device_set_timer(device, cqr->expires);
1330 else if (rc == -EACCES) { 1240 else if (rc == -EACCES) {
1331 dasd_schedule_bh(device); 1241 dasd_schedule_device_bh(device);
1332 } else 1242 } else
1333 /* Hmpf, try again in 1/2 sec */ 1243 /* Hmpf, try again in 1/2 sec */
1334 dasd_set_timer(device, 50); 1244 dasd_device_set_timer(device, 50);
1335}
1336
1337static inline int
1338_wait_for_clear(struct dasd_ccw_req *cqr)
1339{
1340 return (cqr->status == DASD_CQR_QUEUED);
1341} 1245}
1342 1246
1343/* 1247/*
1344 * Remove all requests from the ccw queue (all = '1') or only block device 1248 * Go through all request on the dasd_device request queue,
1345 * requests in case all = '0'. 1249 * terminate them on the cdev if necessary, and return them to the
1346 * Take care of the erp-chain (chained via cqr->refers) and remove either 1250 * submitting layer via callback.
1347 * the whole erp-chain or none of the erp-requests. 1251 * Note:
1348 * If a request is currently running, term_IO is called and the request 1252 * Make sure that all 'submitting layers' still exist when
1349 * is re-queued. Prior to removing the terminated request we need to wait 1253 * this function is called!. In other words, when 'device' is a base
1350 * for the clear-interrupt. 1254 * device then all block layer requests must have been removed before
1351 * In case termination is not possible we stop processing and just finishing 1255 * via dasd_flush_block_queue.
1352 * the already moved requests.
1353 */ 1256 */
1354static int 1257int dasd_flush_device_queue(struct dasd_device *device)
1355dasd_flush_ccw_queue(struct dasd_device * device, int all)
1356{ 1258{
1357 struct dasd_ccw_req *cqr, *orig, *n; 1259 struct dasd_ccw_req *cqr, *n;
1358 int rc, i; 1260 int rc;
1359
1360 struct list_head flush_queue; 1261 struct list_head flush_queue;
1361 1262
1362 INIT_LIST_HEAD(&flush_queue); 1263 INIT_LIST_HEAD(&flush_queue);
1363 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1264 spin_lock_irq(get_ccwdev_lock(device->cdev));
1364 rc = 0; 1265 rc = 0;
1365restart: 1266 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1366 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
1367 /* get original request of erp request-chain */
1368 for (orig = cqr; orig->refers != NULL; orig = orig->refers);
1369
1370 /* Flush all request or only block device requests? */
1371 if (all == 0 && cqr->callback != dasd_end_request_cb &&
1372 orig->callback != dasd_end_request_cb) {
1373 continue;
1374 }
1375 /* Check status and move request to flush_queue */ 1267 /* Check status and move request to flush_queue */
1376 switch (cqr->status) { 1268 switch (cqr->status) {
1377 case DASD_CQR_IN_IO: 1269 case DASD_CQR_IN_IO:
@@ -1387,90 +1279,60 @@ restart:
1387 } 1279 }
1388 break; 1280 break;
1389 case DASD_CQR_QUEUED: 1281 case DASD_CQR_QUEUED:
1390 case DASD_CQR_ERROR:
1391 /* set request to FAILED */
1392 cqr->stopclk = get_clock(); 1282 cqr->stopclk = get_clock();
1393 cqr->status = DASD_CQR_FAILED; 1283 cqr->status = DASD_CQR_CLEARED;
1394 break; 1284 break;
1395 default: /* do not touch the others */ 1285 default: /* no need to modify the others */
1396 break; 1286 break;
1397 } 1287 }
1398 /* Rechain request (including erp chain) */ 1288 list_move_tail(&cqr->devlist, &flush_queue);
1399 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
1400 cqr->endclk = get_clock();
1401 list_move_tail(&cqr->list, &flush_queue);
1402 }
1403 if (i > 1)
1404 /* moved more than one request - need to restart */
1405 goto restart;
1406 } 1289 }
1407
1408finished: 1290finished:
1409 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1291 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1410 /* Now call the callback function of flushed requests */ 1292 /*
1411restart_cb: 1293 * After this point all requests must be in state CLEAR_PENDING,
1412 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1294 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1413 if (cqr->status == DASD_CQR_CLEAR) { 1295 * one of the others.
1414 /* wait for clear interrupt! */ 1296 */
1415 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1297 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1416 cqr->status = DASD_CQR_FAILED; 1298 wait_event(dasd_flush_wq,
1417 } 1299 (cqr->status != DASD_CQR_CLEAR_PENDING));
1418 /* Process finished ERP request. */ 1300 /*
1419 if (cqr->refers) { 1301 * Now set each request back to TERMINATED, DONE or NEED_ERP
1420 __dasd_process_erp(device, cqr); 1302 * and call the callback function of flushed requests
1421 /* restart list_for_xx loop since dasd_process_erp 1303 */
1422 * might remove multiple elements */ 1304 __dasd_device_process_final_queue(device, &flush_queue);
1423 goto restart_cb;
1424 }
1425 /* call the callback function */
1426 cqr->endclk = get_clock();
1427 if (cqr->callback != NULL)
1428 (cqr->callback)(cqr, cqr->callback_data);
1429 }
1430 return rc; 1305 return rc;
1431} 1306}
1432 1307
1433/* 1308/*
1434 * Acquire the device lock and process queues for the device. 1309 * Acquire the device lock and process queues for the device.
1435 */ 1310 */
1436static void 1311static void dasd_device_tasklet(struct dasd_device *device)
1437dasd_tasklet(struct dasd_device * device)
1438{ 1312{
1439 struct list_head final_queue; 1313 struct list_head final_queue;
1440 struct list_head *l, *n;
1441 struct dasd_ccw_req *cqr;
1442 1314
1443 atomic_set (&device->tasklet_scheduled, 0); 1315 atomic_set (&device->tasklet_scheduled, 0);
1444 INIT_LIST_HEAD(&final_queue); 1316 INIT_LIST_HEAD(&final_queue);
1445 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1317 spin_lock_irq(get_ccwdev_lock(device->cdev));
1446 /* Check expire time of first request on the ccw queue. */ 1318 /* Check expire time of first request on the ccw queue. */
1447 __dasd_check_expire(device); 1319 __dasd_device_check_expire(device);
1448 /* Finish off requests on ccw queue */ 1320 /* find final requests on ccw queue */
1449 __dasd_process_ccw_queue(device, &final_queue); 1321 __dasd_device_process_ccw_queue(device, &final_queue);
1450 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1322 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1451 /* Now call the callback function of requests with final status */ 1323 /* Now call the callback function of requests with final status */
1452 list_for_each_safe(l, n, &final_queue) { 1324 __dasd_device_process_final_queue(device, &final_queue);
1453 cqr = list_entry(l, struct dasd_ccw_req, list); 1325 spin_lock_irq(get_ccwdev_lock(device->cdev));
1454 list_del_init(&cqr->list);
1455 if (cqr->callback != NULL)
1456 (cqr->callback)(cqr, cqr->callback_data);
1457 }
1458 spin_lock_irq(&device->request_queue_lock);
1459 spin_lock(get_ccwdev_lock(device->cdev));
1460 /* Get new request from the block device request queue */
1461 __dasd_process_blk_queue(device);
1462 /* Now check if the head of the ccw queue needs to be started. */ 1326 /* Now check if the head of the ccw queue needs to be started. */
1463 __dasd_start_head(device); 1327 __dasd_device_start_head(device);
1464 spin_unlock(get_ccwdev_lock(device->cdev)); 1328 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1465 spin_unlock_irq(&device->request_queue_lock);
1466 dasd_put_device(device); 1329 dasd_put_device(device);
1467} 1330}
1468 1331
1469/* 1332/*
1470 * Schedules a call to dasd_tasklet over the device tasklet. 1333 * Schedules a call to dasd_tasklet over the device tasklet.
1471 */ 1334 */
1472void 1335void dasd_schedule_device_bh(struct dasd_device *device)
1473dasd_schedule_bh(struct dasd_device * device)
1474{ 1336{
1475 /* Protect against rescheduling. */ 1337 /* Protect against rescheduling. */
1476 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1338 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
@@ -1480,160 +1342,109 @@ dasd_schedule_bh(struct dasd_device * device)
1480} 1342}
1481 1343
1482/* 1344/*
1483 * Queue a request to the head of the ccw_queue. Start the I/O if 1345 * Queue a request to the head of the device ccw_queue.
1484 * possible. 1346 * Start the I/O if possible.
1485 */ 1347 */
1486void 1348void dasd_add_request_head(struct dasd_ccw_req *cqr)
1487dasd_add_request_head(struct dasd_ccw_req *req)
1488{ 1349{
1489 struct dasd_device *device; 1350 struct dasd_device *device;
1490 unsigned long flags; 1351 unsigned long flags;
1491 1352
1492 device = req->device; 1353 device = cqr->startdev;
1493 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1354 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1494 req->status = DASD_CQR_QUEUED; 1355 cqr->status = DASD_CQR_QUEUED;
1495 req->device = device; 1356 list_add(&cqr->devlist, &device->ccw_queue);
1496 list_add(&req->list, &device->ccw_queue);
1497 /* let the bh start the request to keep them in order */ 1357 /* let the bh start the request to keep them in order */
1498 dasd_schedule_bh(device); 1358 dasd_schedule_device_bh(device);
1499 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1359 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1500} 1360}
1501 1361
1502/* 1362/*
1503 * Queue a request to the tail of the ccw_queue. Start the I/O if 1363 * Queue a request to the tail of the device ccw_queue.
1504 * possible. 1364 * Start the I/O if possible.
1505 */ 1365 */
1506void 1366void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1507dasd_add_request_tail(struct dasd_ccw_req *req)
1508{ 1367{
1509 struct dasd_device *device; 1368 struct dasd_device *device;
1510 unsigned long flags; 1369 unsigned long flags;
1511 1370
1512 device = req->device; 1371 device = cqr->startdev;
1513 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1372 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1514 req->status = DASD_CQR_QUEUED; 1373 cqr->status = DASD_CQR_QUEUED;
1515 req->device = device; 1374 list_add_tail(&cqr->devlist, &device->ccw_queue);
1516 list_add_tail(&req->list, &device->ccw_queue);
1517 /* let the bh start the request to keep them in order */ 1375 /* let the bh start the request to keep them in order */
1518 dasd_schedule_bh(device); 1376 dasd_schedule_device_bh(device);
1519 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1377 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1520} 1378}
1521 1379
1522/* 1380/*
1523 * Wakeup callback. 1381 * Wakeup helper for the 'sleep_on' functions.
1524 */ 1382 */
1525static void 1383static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1526dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1527{ 1384{
1528 wake_up((wait_queue_head_t *) data); 1385 wake_up((wait_queue_head_t *) data);
1529} 1386}
1530 1387
1531static inline int 1388static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1532_wait_for_wakeup(struct dasd_ccw_req *cqr)
1533{ 1389{
1534 struct dasd_device *device; 1390 struct dasd_device *device;
1535 int rc; 1391 int rc;
1536 1392
1537 device = cqr->device; 1393 device = cqr->startdev;
1538 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1394 spin_lock_irq(get_ccwdev_lock(device->cdev));
1539 rc = ((cqr->status == DASD_CQR_DONE || 1395 rc = ((cqr->status == DASD_CQR_DONE ||
1540 cqr->status == DASD_CQR_FAILED) && 1396 cqr->status == DASD_CQR_NEED_ERP ||
1541 list_empty(&cqr->list)); 1397 cqr->status == DASD_CQR_TERMINATED) &&
1398 list_empty(&cqr->devlist));
1542 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1399 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1543 return rc; 1400 return rc;
1544} 1401}
1545 1402
1546/* 1403/*
1547 * Attempts to start a special ccw queue and waits for its completion. 1404 * Queue a request to the tail of the device ccw_queue and wait for
1405 * it's completion.
1548 */ 1406 */
1549int 1407int dasd_sleep_on(struct dasd_ccw_req *cqr)
1550dasd_sleep_on(struct dasd_ccw_req * cqr)
1551{ 1408{
1552 wait_queue_head_t wait_q; 1409 wait_queue_head_t wait_q;
1553 struct dasd_device *device; 1410 struct dasd_device *device;
1554 int rc; 1411 int rc;
1555 1412
1556 device = cqr->device; 1413 device = cqr->startdev;
1557 spin_lock_irq(get_ccwdev_lock(device->cdev));
1558 1414
1559 init_waitqueue_head (&wait_q); 1415 init_waitqueue_head (&wait_q);
1560 cqr->callback = dasd_wakeup_cb; 1416 cqr->callback = dasd_wakeup_cb;
1561 cqr->callback_data = (void *) &wait_q; 1417 cqr->callback_data = (void *) &wait_q;
1562 cqr->status = DASD_CQR_QUEUED; 1418 dasd_add_request_tail(cqr);
1563 list_add_tail(&cqr->list, &device->ccw_queue);
1564
1565 /* let the bh start the request to keep them in order */
1566 dasd_schedule_bh(device);
1567
1568 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1569
1570 wait_event(wait_q, _wait_for_wakeup(cqr)); 1419 wait_event(wait_q, _wait_for_wakeup(cqr));
1571 1420
1572 /* Request status is either done or failed. */ 1421 /* Request status is either done or failed. */
1573 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1422 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1574 return rc; 1423 return rc;
1575} 1424}
1576 1425
1577/* 1426/*
1578 * Attempts to start a special ccw queue and wait interruptible 1427 * Queue a request to the tail of the device ccw_queue and wait
1579 * for its completion. 1428 * interruptible for it's completion.
1580 */ 1429 */
1581int 1430int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1582dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1583{ 1431{
1584 wait_queue_head_t wait_q; 1432 wait_queue_head_t wait_q;
1585 struct dasd_device *device; 1433 struct dasd_device *device;
1586 int rc, finished; 1434 int rc;
1587
1588 device = cqr->device;
1589 spin_lock_irq(get_ccwdev_lock(device->cdev));
1590 1435
1436 device = cqr->startdev;
1591 init_waitqueue_head (&wait_q); 1437 init_waitqueue_head (&wait_q);
1592 cqr->callback = dasd_wakeup_cb; 1438 cqr->callback = dasd_wakeup_cb;
1593 cqr->callback_data = (void *) &wait_q; 1439 cqr->callback_data = (void *) &wait_q;
1594 cqr->status = DASD_CQR_QUEUED; 1440 dasd_add_request_tail(cqr);
1595 list_add_tail(&cqr->list, &device->ccw_queue); 1441 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1596 1442 if (rc == -ERESTARTSYS) {
1597 /* let the bh start the request to keep them in order */ 1443 dasd_cancel_req(cqr);
1598 dasd_schedule_bh(device); 1444 /* wait (non-interruptible) for final status */
1599 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1445 wait_event(wait_q, _wait_for_wakeup(cqr));
1600
1601 finished = 0;
1602 while (!finished) {
1603 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1604 if (rc != -ERESTARTSYS) {
1605 /* Request is final (done or failed) */
1606 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1607 break;
1608 }
1609 spin_lock_irq(get_ccwdev_lock(device->cdev));
1610 switch (cqr->status) {
1611 case DASD_CQR_IN_IO:
1612 /* terminate runnig cqr */
1613 if (device->discipline->term_IO) {
1614 cqr->retries = -1;
1615 device->discipline->term_IO(cqr);
1616 /* wait (non-interruptible) for final status
1617 * because signal ist still pending */
1618 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1619 wait_event(wait_q, _wait_for_wakeup(cqr));
1620 spin_lock_irq(get_ccwdev_lock(device->cdev));
1621 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1622 finished = 1;
1623 }
1624 break;
1625 case DASD_CQR_QUEUED:
1626 /* request */
1627 list_del_init(&cqr->list);
1628 rc = -EIO;
1629 finished = 1;
1630 break;
1631 default:
1632 /* cqr with 'non-interruptable' status - just wait */
1633 break;
1634 }
1635 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1636 } 1446 }
1447 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1637 return rc; 1448 return rc;
1638} 1449}
1639 1450
@@ -1643,25 +1454,23 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1643 * and be put back to status queued, before the special request is added 1454 * and be put back to status queued, before the special request is added
1644 * to the head of the queue. Then the special request is waited on normally. 1455 * to the head of the queue. Then the special request is waited on normally.
1645 */ 1456 */
1646static inline int 1457static inline int _dasd_term_running_cqr(struct dasd_device *device)
1647_dasd_term_running_cqr(struct dasd_device *device)
1648{ 1458{
1649 struct dasd_ccw_req *cqr; 1459 struct dasd_ccw_req *cqr;
1650 1460
1651 if (list_empty(&device->ccw_queue)) 1461 if (list_empty(&device->ccw_queue))
1652 return 0; 1462 return 0;
1653 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1463 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1654 return device->discipline->term_IO(cqr); 1464 return device->discipline->term_IO(cqr);
1655} 1465}
1656 1466
1657int 1467int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1658dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1659{ 1468{
1660 wait_queue_head_t wait_q; 1469 wait_queue_head_t wait_q;
1661 struct dasd_device *device; 1470 struct dasd_device *device;
1662 int rc; 1471 int rc;
1663 1472
1664 device = cqr->device; 1473 device = cqr->startdev;
1665 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1474 spin_lock_irq(get_ccwdev_lock(device->cdev));
1666 rc = _dasd_term_running_cqr(device); 1475 rc = _dasd_term_running_cqr(device);
1667 if (rc) { 1476 if (rc) {
@@ -1673,17 +1482,17 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1673 cqr->callback = dasd_wakeup_cb; 1482 cqr->callback = dasd_wakeup_cb;
1674 cqr->callback_data = (void *) &wait_q; 1483 cqr->callback_data = (void *) &wait_q;
1675 cqr->status = DASD_CQR_QUEUED; 1484 cqr->status = DASD_CQR_QUEUED;
1676 list_add(&cqr->list, &device->ccw_queue); 1485 list_add(&cqr->devlist, &device->ccw_queue);
1677 1486
1678 /* let the bh start the request to keep them in order */ 1487 /* let the bh start the request to keep them in order */
1679 dasd_schedule_bh(device); 1488 dasd_schedule_device_bh(device);
1680 1489
1681 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1490 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1682 1491
1683 wait_event(wait_q, _wait_for_wakeup(cqr)); 1492 wait_event(wait_q, _wait_for_wakeup(cqr));
1684 1493
1685 /* Request status is either done or failed. */ 1494 /* Request status is either done or failed. */
1686 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1495 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1687 return rc; 1496 return rc;
1688} 1497}
1689 1498
@@ -1692,11 +1501,14 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1692 * This is useful to timeout requests. The request will be 1501 * This is useful to timeout requests. The request will be
1693 * terminated if it is currently in i/o. 1502 * terminated if it is currently in i/o.
1694 * Returns 1 if the request has been terminated. 1503 * Returns 1 if the request has been terminated.
1504 * 0 if there was no need to terminate the request (not started yet)
1505 * negative error code if termination failed
1506 * Cancellation of a request is an asynchronous operation! The calling
1507 * function has to wait until the request is properly returned via callback.
1695 */ 1508 */
1696int 1509int dasd_cancel_req(struct dasd_ccw_req *cqr)
1697dasd_cancel_req(struct dasd_ccw_req *cqr)
1698{ 1510{
1699 struct dasd_device *device = cqr->device; 1511 struct dasd_device *device = cqr->startdev;
1700 unsigned long flags; 1512 unsigned long flags;
1701 int rc; 1513 int rc;
1702 1514
@@ -1704,74 +1516,453 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
1704 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1516 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1705 switch (cqr->status) { 1517 switch (cqr->status) {
1706 case DASD_CQR_QUEUED: 1518 case DASD_CQR_QUEUED:
1707 /* request was not started - just set to failed */ 1519 /* request was not started - just set to cleared */
1708 cqr->status = DASD_CQR_FAILED; 1520 cqr->status = DASD_CQR_CLEARED;
1709 break; 1521 break;
1710 case DASD_CQR_IN_IO: 1522 case DASD_CQR_IN_IO:
1711 /* request in IO - terminate IO and release again */ 1523 /* request in IO - terminate IO and release again */
1712 if (device->discipline->term_IO(cqr) != 0) 1524 rc = device->discipline->term_IO(cqr);
1713 /* what to do if unable to terminate ?????? 1525 if (rc) {
1714 e.g. not _IN_IO */ 1526 DEV_MESSAGE(KERN_ERR, device,
1715 cqr->status = DASD_CQR_FAILED; 1527 "dasd_cancel_req is unable "
1716 cqr->stopclk = get_clock(); 1528 " to terminate request %p, rc = %d",
1717 rc = 1; 1529 cqr, rc);
1530 } else {
1531 cqr->stopclk = get_clock();
1532 rc = 1;
1533 }
1718 break; 1534 break;
1719 case DASD_CQR_DONE: 1535 default: /* already finished or clear pending - do nothing */
1720 case DASD_CQR_FAILED:
1721 /* already finished - do nothing */
1722 break; 1536 break;
1723 default: 1537 }
1724 DEV_MESSAGE(KERN_ALERT, device, 1538 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1725 "invalid status %02x in request", 1539 dasd_schedule_device_bh(device);
1726 cqr->status); 1540 return rc;
1541}
1542
1543
1544/*
1545 * SECTION: Operations of the dasd_block layer.
1546 */
1547
1548/*
1549 * Timeout function for dasd_block. This is used when the block layer
1550 * is waiting for something that may not come reliably, (e.g. a state
1551 * change interrupt)
1552 */
1553static void dasd_block_timeout(unsigned long ptr)
1554{
1555 unsigned long flags;
1556 struct dasd_block *block;
1557
1558 block = (struct dasd_block *) ptr;
1559 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1560 /* re-activate request queue */
1561 block->base->stopped &= ~DASD_STOPPED_PENDING;
1562 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1563 dasd_schedule_block_bh(block);
1564}
1565
1566/*
1567 * Setup timeout for a dasd_block in jiffies.
1568 */
1569void dasd_block_set_timer(struct dasd_block *block, int expires)
1570{
1571 if (expires == 0) {
1572 if (timer_pending(&block->timer))
1573 del_timer(&block->timer);
1574 return;
1575 }
1576 if (timer_pending(&block->timer)) {
1577 if (mod_timer(&block->timer, jiffies + expires))
1578 return;
1579 }
1580 block->timer.function = dasd_block_timeout;
1581 block->timer.data = (unsigned long) block;
1582 block->timer.expires = jiffies + expires;
1583 add_timer(&block->timer);
1584}
1585
1586/*
1587 * Clear timeout for a dasd_block.
1588 */
1589void dasd_block_clear_timer(struct dasd_block *block)
1590{
1591 if (timer_pending(&block->timer))
1592 del_timer(&block->timer);
1593}
1594
1595/*
1596 * posts the buffer_cache about a finalized request
1597 */
1598static inline void dasd_end_request(struct request *req, int uptodate)
1599{
1600 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1727 BUG(); 1601 BUG();
1602 add_disk_randomness(req->rq_disk);
1603 end_that_request_last(req, uptodate);
1604}
1605
1606/*
1607 * Process finished error recovery ccw.
1608 */
1609static inline void __dasd_block_process_erp(struct dasd_block *block,
1610 struct dasd_ccw_req *cqr)
1611{
1612 dasd_erp_fn_t erp_fn;
1613 struct dasd_device *device = block->base;
1728 1614
1615 if (cqr->status == DASD_CQR_DONE)
1616 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1617 else
1618 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1619 erp_fn = device->discipline->erp_postaction(cqr);
1620 erp_fn(cqr);
1621}
1622
1623/*
1624 * Fetch requests from the block device queue.
1625 */
1626static void __dasd_process_request_queue(struct dasd_block *block)
1627{
1628 struct request_queue *queue;
1629 struct request *req;
1630 struct dasd_ccw_req *cqr;
1631 struct dasd_device *basedev;
1632 unsigned long flags;
1633 queue = block->request_queue;
1634 basedev = block->base;
1635 /* No queue ? Then there is nothing to do. */
1636 if (queue == NULL)
1637 return;
1638
1639 /*
1640 * We requeue request from the block device queue to the ccw
1641 * queue only in two states. In state DASD_STATE_READY the
1642 * partition detection is done and we need to requeue requests
1643 * for that. State DASD_STATE_ONLINE is normal block device
1644 * operation.
1645 */
1646 if (basedev->state < DASD_STATE_READY)
1647 return;
1648 /* Now we try to fetch requests from the request queue */
1649 while (!blk_queue_plugged(queue) &&
1650 elv_next_request(queue)) {
1651
1652 req = elv_next_request(queue);
1653
1654 if (basedev->features & DASD_FEATURE_READONLY &&
1655 rq_data_dir(req) == WRITE) {
1656 DBF_DEV_EVENT(DBF_ERR, basedev,
1657 "Rejecting write request %p",
1658 req);
1659 blkdev_dequeue_request(req);
1660 dasd_end_request(req, 0);
1661 continue;
1662 }
1663 cqr = basedev->discipline->build_cp(basedev, block, req);
1664 if (IS_ERR(cqr)) {
1665 if (PTR_ERR(cqr) == -EBUSY)
1666 break; /* normal end condition */
1667 if (PTR_ERR(cqr) == -ENOMEM)
1668 break; /* terminate request queue loop */
1669 if (PTR_ERR(cqr) == -EAGAIN) {
1670 /*
1671 * The current request cannot be build right
1672 * now, we have to try later. If this request
1673 * is the head-of-queue we stop the device
1674 * for 1/2 second.
1675 */
1676 if (!list_empty(&block->ccw_queue))
1677 break;
1678 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1679 basedev->stopped |= DASD_STOPPED_PENDING;
1680 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1681 dasd_block_set_timer(block, HZ/2);
1682 break;
1683 }
1684 DBF_DEV_EVENT(DBF_ERR, basedev,
1685 "CCW creation failed (rc=%ld) "
1686 "on request %p",
1687 PTR_ERR(cqr), req);
1688 blkdev_dequeue_request(req);
1689 dasd_end_request(req, 0);
1690 continue;
1691 }
1692 /*
1693 * Note: callback is set to dasd_return_cqr_cb in
1694 * __dasd_block_start_head to cover erp requests as well
1695 */
1696 cqr->callback_data = (void *) req;
1697 cqr->status = DASD_CQR_FILLED;
1698 blkdev_dequeue_request(req);
1699 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1700 dasd_profile_start(block, cqr, req);
1701 }
1702}
1703
1704static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1705{
1706 struct request *req;
1707 int status;
1708
1709 req = (struct request *) cqr->callback_data;
1710 dasd_profile_end(cqr->block, cqr, req);
1711 status = cqr->memdev->discipline->free_cp(cqr, req);
1712 dasd_end_request(req, status);
1713}
1714
1715/*
1716 * Process ccw request queue.
1717 */
1718static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1719 struct list_head *final_queue)
1720{
1721 struct list_head *l, *n;
1722 struct dasd_ccw_req *cqr;
1723 dasd_erp_fn_t erp_fn;
1724 unsigned long flags;
1725 struct dasd_device *base = block->base;
1726
1727restart:
1728 /* Process request with final status. */
1729 list_for_each_safe(l, n, &block->ccw_queue) {
1730 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1731 if (cqr->status != DASD_CQR_DONE &&
1732 cqr->status != DASD_CQR_FAILED &&
1733 cqr->status != DASD_CQR_NEED_ERP &&
1734 cqr->status != DASD_CQR_TERMINATED)
1735 continue;
1736
1737 if (cqr->status == DASD_CQR_TERMINATED) {
1738 base->discipline->handle_terminated_request(cqr);
1739 goto restart;
1740 }
1741
1742 /* Process requests that may be recovered */
1743 if (cqr->status == DASD_CQR_NEED_ERP) {
1744 if (cqr->irb.esw.esw0.erw.cons &&
1745 test_bit(DASD_CQR_FLAGS_USE_ERP,
1746 &cqr->flags)) {
1747 erp_fn = base->discipline->erp_action(cqr);
1748 erp_fn(cqr);
1749 }
1750 goto restart;
1751 }
1752
1753 /* First of all call extended error reporting. */
1754 if (dasd_eer_enabled(base) &&
1755 cqr->status == DASD_CQR_FAILED) {
1756 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1757
1758 /* restart request */
1759 cqr->status = DASD_CQR_FILLED;
1760 cqr->retries = 255;
1761 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1762 base->stopped |= DASD_STOPPED_QUIESCE;
1763 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1764 flags);
1765 goto restart;
1766 }
1767
1768 /* Process finished ERP request. */
1769 if (cqr->refers) {
1770 __dasd_block_process_erp(block, cqr);
1771 goto restart;
1772 }
1773
1774 /* Rechain finished requests to final queue */
1775 cqr->endclk = get_clock();
1776 list_move_tail(&cqr->blocklist, final_queue);
1777 }
1778}
1779
1780static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1781{
1782 dasd_schedule_block_bh(cqr->block);
1783}
1784
1785static void __dasd_block_start_head(struct dasd_block *block)
1786{
1787 struct dasd_ccw_req *cqr;
1788
1789 if (list_empty(&block->ccw_queue))
1790 return;
1791 /* We allways begin with the first requests on the queue, as some
1792 * of previously started requests have to be enqueued on a
1793 * dasd_device again for error recovery.
1794 */
1795 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1796 if (cqr->status != DASD_CQR_FILLED)
1797 continue;
1798 /* Non-temporary stop condition will trigger fail fast */
1799 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1800 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1801 (!dasd_eer_enabled(block->base))) {
1802 cqr->status = DASD_CQR_FAILED;
1803 dasd_schedule_block_bh(block);
1804 continue;
1805 }
1806 /* Don't try to start requests if device is stopped */
1807 if (block->base->stopped)
1808 return;
1809
1810 /* just a fail safe check, should not happen */
1811 if (!cqr->startdev)
1812 cqr->startdev = block->base;
1813
1814 /* make sure that the requests we submit find their way back */
1815 cqr->callback = dasd_return_cqr_cb;
1816
1817 dasd_add_request_tail(cqr);
1818 }
1819}
1820
1821/*
1822 * Central dasd_block layer routine. Takes requests from the generic
1823 * block layer request queue, creates ccw requests, enqueues them on
1824 * a dasd_device and processes ccw requests that have been returned.
1825 */
1826static void dasd_block_tasklet(struct dasd_block *block)
1827{
1828 struct list_head final_queue;
1829 struct list_head *l, *n;
1830 struct dasd_ccw_req *cqr;
1831
1832 atomic_set(&block->tasklet_scheduled, 0);
1833 INIT_LIST_HEAD(&final_queue);
1834 spin_lock(&block->queue_lock);
1835 /* Finish off requests on ccw queue */
1836 __dasd_process_block_ccw_queue(block, &final_queue);
1837 spin_unlock(&block->queue_lock);
1838 /* Now call the callback function of requests with final status */
1839 spin_lock_irq(&block->request_queue_lock);
1840 list_for_each_safe(l, n, &final_queue) {
1841 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1842 list_del_init(&cqr->blocklist);
1843 __dasd_cleanup_cqr(cqr);
1844 }
1845 spin_lock(&block->queue_lock);
1846 /* Get new request from the block device request queue */
1847 __dasd_process_request_queue(block);
1848 /* Now check if the head of the ccw queue needs to be started. */
1849 __dasd_block_start_head(block);
1850 spin_unlock(&block->queue_lock);
1851 spin_unlock_irq(&block->request_queue_lock);
1852 dasd_put_device(block->base);
1853}
1854
1855static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1856{
1857 wake_up(&dasd_flush_wq);
1858}
1859
1860/*
1861 * Go through all request on the dasd_block request queue, cancel them
1862 * on the respective dasd_device, and return them to the generic
1863 * block layer.
1864 */
1865static int dasd_flush_block_queue(struct dasd_block *block)
1866{
1867 struct dasd_ccw_req *cqr, *n;
1868 int rc, i;
1869 struct list_head flush_queue;
1870
1871 INIT_LIST_HEAD(&flush_queue);
1872 spin_lock_bh(&block->queue_lock);
1873 rc = 0;
1874restart:
1875 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1876 /* if this request currently owned by a dasd_device cancel it */
1877 if (cqr->status >= DASD_CQR_QUEUED)
1878 rc = dasd_cancel_req(cqr);
1879 if (rc < 0)
1880 break;
1881 /* Rechain request (including erp chain) so it won't be
1882 * touched by the dasd_block_tasklet anymore.
1883 * Replace the callback so we notice when the request
1884 * is returned from the dasd_device layer.
1885 */
1886 cqr->callback = _dasd_wake_block_flush_cb;
1887 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1888 list_move_tail(&cqr->blocklist, &flush_queue);
1889 if (i > 1)
1890 /* moved more than one request - need to restart */
1891 goto restart;
1892 }
1893 spin_unlock_bh(&block->queue_lock);
1894 /* Now call the callback function of flushed requests */
1895restart_cb:
1896 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1897 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1898 /* Process finished ERP request. */
1899 if (cqr->refers) {
1900 __dasd_block_process_erp(block, cqr);
1901 /* restart list_for_xx loop since dasd_process_erp
1902 * might remove multiple elements */
1903 goto restart_cb;
1904 }
1905 /* call the callback function */
1906 cqr->endclk = get_clock();
1907 list_del_init(&cqr->blocklist);
1908 __dasd_cleanup_cqr(cqr);
1729 } 1909 }
1730 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1731 dasd_schedule_bh(device);
1732 return rc; 1910 return rc;
1733} 1911}
1734 1912
1735/* 1913/*
1736 * SECTION: Block device operations (request queue, partitions, open, release). 1914 * Schedules a call to dasd_tasklet over the device tasklet.
1915 */
1916void dasd_schedule_block_bh(struct dasd_block *block)
1917{
1918 /* Protect against rescheduling. */
1919 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1920 return;
1921 /* life cycle of block is bound to it's base device */
1922 dasd_get_device(block->base);
1923 tasklet_hi_schedule(&block->tasklet);
1924}
1925
1926
1927/*
1928 * SECTION: external block device operations
1929 * (request queue handling, open, release, etc.)
1737 */ 1930 */
1738 1931
1739/* 1932/*
1740 * Dasd request queue function. Called from ll_rw_blk.c 1933 * Dasd request queue function. Called from ll_rw_blk.c
1741 */ 1934 */
1742static void 1935static void do_dasd_request(struct request_queue *queue)
1743do_dasd_request(struct request_queue * queue)
1744{ 1936{
1745 struct dasd_device *device; 1937 struct dasd_block *block;
1746 1938
1747 device = (struct dasd_device *) queue->queuedata; 1939 block = queue->queuedata;
1748 spin_lock(get_ccwdev_lock(device->cdev)); 1940 spin_lock(&block->queue_lock);
1749 /* Get new request from the block device request queue */ 1941 /* Get new request from the block device request queue */
1750 __dasd_process_blk_queue(device); 1942 __dasd_process_request_queue(block);
1751 /* Now check if the head of the ccw queue needs to be started. */ 1943 /* Now check if the head of the ccw queue needs to be started. */
1752 __dasd_start_head(device); 1944 __dasd_block_start_head(block);
1753 spin_unlock(get_ccwdev_lock(device->cdev)); 1945 spin_unlock(&block->queue_lock);
1754} 1946}
1755 1947
1756/* 1948/*
1757 * Allocate and initialize request queue and default I/O scheduler. 1949 * Allocate and initialize request queue and default I/O scheduler.
1758 */ 1950 */
1759static int 1951static int dasd_alloc_queue(struct dasd_block *block)
1760dasd_alloc_queue(struct dasd_device * device)
1761{ 1952{
1762 int rc; 1953 int rc;
1763 1954
1764 device->request_queue = blk_init_queue(do_dasd_request, 1955 block->request_queue = blk_init_queue(do_dasd_request,
1765 &device->request_queue_lock); 1956 &block->request_queue_lock);
1766 if (device->request_queue == NULL) 1957 if (block->request_queue == NULL)
1767 return -ENOMEM; 1958 return -ENOMEM;
1768 1959
1769 device->request_queue->queuedata = device; 1960 block->request_queue->queuedata = block;
1770 1961
1771 elevator_exit(device->request_queue->elevator); 1962 elevator_exit(block->request_queue->elevator);
1772 rc = elevator_init(device->request_queue, "deadline"); 1963 rc = elevator_init(block->request_queue, "deadline");
1773 if (rc) { 1964 if (rc) {
1774 blk_cleanup_queue(device->request_queue); 1965 blk_cleanup_queue(block->request_queue);
1775 return rc; 1966 return rc;
1776 } 1967 }
1777 return 0; 1968 return 0;
@@ -1780,79 +1971,76 @@ dasd_alloc_queue(struct dasd_device * device)
1780/* 1971/*
1781 * Allocate and initialize request queue. 1972 * Allocate and initialize request queue.
1782 */ 1973 */
1783static void 1974static void dasd_setup_queue(struct dasd_block *block)
1784dasd_setup_queue(struct dasd_device * device)
1785{ 1975{
1786 int max; 1976 int max;
1787 1977
1788 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1978 blk_queue_hardsect_size(block->request_queue, block->bp_block);
1789 max = device->discipline->max_blocks << device->s2b_shift; 1979 max = block->base->discipline->max_blocks << block->s2b_shift;
1790 blk_queue_max_sectors(device->request_queue, max); 1980 blk_queue_max_sectors(block->request_queue, max);
1791 blk_queue_max_phys_segments(device->request_queue, -1L); 1981 blk_queue_max_phys_segments(block->request_queue, -1L);
1792 blk_queue_max_hw_segments(device->request_queue, -1L); 1982 blk_queue_max_hw_segments(block->request_queue, -1L);
1793 blk_queue_max_segment_size(device->request_queue, -1L); 1983 blk_queue_max_segment_size(block->request_queue, -1L);
1794 blk_queue_segment_boundary(device->request_queue, -1L); 1984 blk_queue_segment_boundary(block->request_queue, -1L);
1795 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); 1985 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
1796} 1986}
1797 1987
1798/* 1988/*
1799 * Deactivate and free request queue. 1989 * Deactivate and free request queue.
1800 */ 1990 */
1801static void 1991static void dasd_free_queue(struct dasd_block *block)
1802dasd_free_queue(struct dasd_device * device)
1803{ 1992{
1804 if (device->request_queue) { 1993 if (block->request_queue) {
1805 blk_cleanup_queue(device->request_queue); 1994 blk_cleanup_queue(block->request_queue);
1806 device->request_queue = NULL; 1995 block->request_queue = NULL;
1807 } 1996 }
1808} 1997}
1809 1998
1810/* 1999/*
1811 * Flush request on the request queue. 2000 * Flush request on the request queue.
1812 */ 2001 */
1813static void 2002static void dasd_flush_request_queue(struct dasd_block *block)
1814dasd_flush_request_queue(struct dasd_device * device)
1815{ 2003{
1816 struct request *req; 2004 struct request *req;
1817 2005
1818 if (!device->request_queue) 2006 if (!block->request_queue)
1819 return; 2007 return;
1820 2008
1821 spin_lock_irq(&device->request_queue_lock); 2009 spin_lock_irq(&block->request_queue_lock);
1822 while ((req = elv_next_request(device->request_queue))) { 2010 while ((req = elv_next_request(block->request_queue))) {
1823 blkdev_dequeue_request(req); 2011 blkdev_dequeue_request(req);
1824 dasd_end_request(req, 0); 2012 dasd_end_request(req, 0);
1825 } 2013 }
1826 spin_unlock_irq(&device->request_queue_lock); 2014 spin_unlock_irq(&block->request_queue_lock);
1827} 2015}
1828 2016
1829static int 2017static int dasd_open(struct inode *inp, struct file *filp)
1830dasd_open(struct inode *inp, struct file *filp)
1831{ 2018{
1832 struct gendisk *disk = inp->i_bdev->bd_disk; 2019 struct gendisk *disk = inp->i_bdev->bd_disk;
1833 struct dasd_device *device = disk->private_data; 2020 struct dasd_block *block = disk->private_data;
2021 struct dasd_device *base = block->base;
1834 int rc; 2022 int rc;
1835 2023
1836 atomic_inc(&device->open_count); 2024 atomic_inc(&block->open_count);
1837 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2025 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
1838 rc = -ENODEV; 2026 rc = -ENODEV;
1839 goto unlock; 2027 goto unlock;
1840 } 2028 }
1841 2029
1842 if (!try_module_get(device->discipline->owner)) { 2030 if (!try_module_get(base->discipline->owner)) {
1843 rc = -EINVAL; 2031 rc = -EINVAL;
1844 goto unlock; 2032 goto unlock;
1845 } 2033 }
1846 2034
1847 if (dasd_probeonly) { 2035 if (dasd_probeonly) {
1848 DEV_MESSAGE(KERN_INFO, device, "%s", 2036 DEV_MESSAGE(KERN_INFO, base, "%s",
1849 "No access to device due to probeonly mode"); 2037 "No access to device due to probeonly mode");
1850 rc = -EPERM; 2038 rc = -EPERM;
1851 goto out; 2039 goto out;
1852 } 2040 }
1853 2041
1854 if (device->state <= DASD_STATE_BASIC) { 2042 if (base->state <= DASD_STATE_BASIC) {
1855 DBF_DEV_EVENT(DBF_ERR, device, " %s", 2043 DBF_DEV_EVENT(DBF_ERR, base, " %s",
1856 " Cannot open unrecognized device"); 2044 " Cannot open unrecognized device");
1857 rc = -ENODEV; 2045 rc = -ENODEV;
1858 goto out; 2046 goto out;
@@ -1861,41 +2049,41 @@ dasd_open(struct inode *inp, struct file *filp)
1861 return 0; 2049 return 0;
1862 2050
1863out: 2051out:
1864 module_put(device->discipline->owner); 2052 module_put(base->discipline->owner);
1865unlock: 2053unlock:
1866 atomic_dec(&device->open_count); 2054 atomic_dec(&block->open_count);
1867 return rc; 2055 return rc;
1868} 2056}
1869 2057
1870static int 2058static int dasd_release(struct inode *inp, struct file *filp)
1871dasd_release(struct inode *inp, struct file *filp)
1872{ 2059{
1873 struct gendisk *disk = inp->i_bdev->bd_disk; 2060 struct gendisk *disk = inp->i_bdev->bd_disk;
1874 struct dasd_device *device = disk->private_data; 2061 struct dasd_block *block = disk->private_data;
1875 2062
1876 atomic_dec(&device->open_count); 2063 atomic_dec(&block->open_count);
1877 module_put(device->discipline->owner); 2064 module_put(block->base->discipline->owner);
1878 return 0; 2065 return 0;
1879} 2066}
1880 2067
1881/* 2068/*
1882 * Return disk geometry. 2069 * Return disk geometry.
1883 */ 2070 */
1884static int 2071static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1885dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1886{ 2072{
1887 struct dasd_device *device; 2073 struct dasd_block *block;
2074 struct dasd_device *base;
1888 2075
1889 device = bdev->bd_disk->private_data; 2076 block = bdev->bd_disk->private_data;
1890 if (!device) 2077 base = block->base;
2078 if (!block)
1891 return -ENODEV; 2079 return -ENODEV;
1892 2080
1893 if (!device->discipline || 2081 if (!base->discipline ||
1894 !device->discipline->fill_geometry) 2082 !base->discipline->fill_geometry)
1895 return -EINVAL; 2083 return -EINVAL;
1896 2084
1897 device->discipline->fill_geometry(device, geo); 2085 base->discipline->fill_geometry(block, geo);
1898 geo->start = get_start_sect(bdev) >> device->s2b_shift; 2086 geo->start = get_start_sect(bdev) >> block->s2b_shift;
1899 return 0; 2087 return 0;
1900} 2088}
1901 2089
@@ -1909,6 +2097,9 @@ dasd_device_operations = {
1909 .getgeo = dasd_getgeo, 2097 .getgeo = dasd_getgeo,
1910}; 2098};
1911 2099
2100/*******************************************************************************
2101 * end of block device operations
2102 */
1912 2103
1913static void 2104static void
1914dasd_exit(void) 2105dasd_exit(void)
@@ -1937,9 +2128,8 @@ dasd_exit(void)
1937 * Initial attempt at a probe function. this can be simplified once 2128 * Initial attempt at a probe function. this can be simplified once
1938 * the other detection code is gone. 2129 * the other detection code is gone.
1939 */ 2130 */
1940int 2131int dasd_generic_probe(struct ccw_device *cdev,
1941dasd_generic_probe (struct ccw_device *cdev, 2132 struct dasd_discipline *discipline)
1942 struct dasd_discipline *discipline)
1943{ 2133{
1944 int ret; 2134 int ret;
1945 2135
@@ -1969,19 +2159,20 @@ dasd_generic_probe (struct ccw_device *cdev,
1969 ret = ccw_device_set_online(cdev); 2159 ret = ccw_device_set_online(cdev);
1970 if (ret) 2160 if (ret)
1971 printk(KERN_WARNING 2161 printk(KERN_WARNING
1972 "dasd_generic_probe: could not initially online " 2162 "dasd_generic_probe: could not initially "
1973 "ccw-device %s\n", cdev->dev.bus_id); 2163 "online ccw-device %s; return code: %d\n",
1974 return ret; 2164 cdev->dev.bus_id, ret);
2165 return 0;
1975} 2166}
1976 2167
1977/* 2168/*
1978 * This will one day be called from a global not_oper handler. 2169 * This will one day be called from a global not_oper handler.
1979 * It is also used by driver_unregister during module unload. 2170 * It is also used by driver_unregister during module unload.
1980 */ 2171 */
1981void 2172void dasd_generic_remove(struct ccw_device *cdev)
1982dasd_generic_remove (struct ccw_device *cdev)
1983{ 2173{
1984 struct dasd_device *device; 2174 struct dasd_device *device;
2175 struct dasd_block *block;
1985 2176
1986 cdev->handler = NULL; 2177 cdev->handler = NULL;
1987 2178
@@ -2001,7 +2192,15 @@ dasd_generic_remove (struct ccw_device *cdev)
2001 */ 2192 */
2002 dasd_set_target_state(device, DASD_STATE_NEW); 2193 dasd_set_target_state(device, DASD_STATE_NEW);
2003 /* dasd_delete_device destroys the device reference. */ 2194 /* dasd_delete_device destroys the device reference. */
2195 block = device->block;
2196 device->block = NULL;
2004 dasd_delete_device(device); 2197 dasd_delete_device(device);
2198 /*
2199 * life cycle of block is bound to device, so delete it after
2200 * device was safely removed
2201 */
2202 if (block)
2203 dasd_free_block(block);
2005} 2204}
2006 2205
2007/* 2206/*
@@ -2009,10 +2208,8 @@ dasd_generic_remove (struct ccw_device *cdev)
2009 * the device is detected for the first time and is supposed to be used 2208 * the device is detected for the first time and is supposed to be used
2010 * or the user has started activation through sysfs. 2209 * or the user has started activation through sysfs.
2011 */ 2210 */
2012int 2211int dasd_generic_set_online(struct ccw_device *cdev,
2013dasd_generic_set_online (struct ccw_device *cdev, 2212 struct dasd_discipline *base_discipline)
2014 struct dasd_discipline *base_discipline)
2015
2016{ 2213{
2017 struct dasd_discipline *discipline; 2214 struct dasd_discipline *discipline;
2018 struct dasd_device *device; 2215 struct dasd_device *device;
@@ -2048,6 +2245,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
2048 device->base_discipline = base_discipline; 2245 device->base_discipline = base_discipline;
2049 device->discipline = discipline; 2246 device->discipline = discipline;
2050 2247
2248 /* check_device will allocate block device if necessary */
2051 rc = discipline->check_device(device); 2249 rc = discipline->check_device(device);
2052 if (rc) { 2250 if (rc) {
2053 printk (KERN_WARNING 2251 printk (KERN_WARNING
@@ -2067,6 +2265,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
2067 cdev->dev.bus_id); 2265 cdev->dev.bus_id);
2068 rc = -ENODEV; 2266 rc = -ENODEV;
2069 dasd_set_target_state(device, DASD_STATE_NEW); 2267 dasd_set_target_state(device, DASD_STATE_NEW);
2268 if (device->block)
2269 dasd_free_block(device->block);
2070 dasd_delete_device(device); 2270 dasd_delete_device(device);
2071 } else 2271 } else
2072 pr_debug("dasd_generic device %s found\n", 2272 pr_debug("dasd_generic device %s found\n",
@@ -2081,10 +2281,10 @@ dasd_generic_set_online (struct ccw_device *cdev,
2081 return rc; 2281 return rc;
2082} 2282}
2083 2283
2084int 2284int dasd_generic_set_offline(struct ccw_device *cdev)
2085dasd_generic_set_offline (struct ccw_device *cdev)
2086{ 2285{
2087 struct dasd_device *device; 2286 struct dasd_device *device;
2287 struct dasd_block *block;
2088 int max_count, open_count; 2288 int max_count, open_count;
2089 2289
2090 device = dasd_device_from_cdev(cdev); 2290 device = dasd_device_from_cdev(cdev);
@@ -2101,30 +2301,39 @@ dasd_generic_set_offline (struct ccw_device *cdev)
2101 * the blkdev_get in dasd_scan_partitions. We are only interested 2301 * the blkdev_get in dasd_scan_partitions. We are only interested
2102 * in the other openers. 2302 * in the other openers.
2103 */ 2303 */
2104 max_count = device->bdev ? 0 : -1; 2304 if (device->block) {
2105 open_count = (int) atomic_read(&device->open_count); 2305 struct dasd_block *block = device->block;
2106 if (open_count > max_count) { 2306 max_count = block->bdev ? 0 : -1;
2107 if (open_count > 0) 2307 open_count = (int) atomic_read(&block->open_count);
2108 printk (KERN_WARNING "Can't offline dasd device with " 2308 if (open_count > max_count) {
2109 "open count = %i.\n", 2309 if (open_count > 0)
2110 open_count); 2310 printk(KERN_WARNING "Can't offline dasd "
2111 else 2311 "device with open count = %i.\n",
2112 printk (KERN_WARNING "%s", 2312 open_count);
2113 "Can't offline dasd device due to internal " 2313 else
2114 "use\n"); 2314 printk(KERN_WARNING "%s",
2115 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2315 "Can't offline dasd device due "
2116 dasd_put_device(device); 2316 "to internal use\n");
2117 return -EBUSY; 2317 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2318 dasd_put_device(device);
2319 return -EBUSY;
2320 }
2118 } 2321 }
2119 dasd_set_target_state(device, DASD_STATE_NEW); 2322 dasd_set_target_state(device, DASD_STATE_NEW);
2120 /* dasd_delete_device destroys the device reference. */ 2323 /* dasd_delete_device destroys the device reference. */
2324 block = device->block;
2325 device->block = NULL;
2121 dasd_delete_device(device); 2326 dasd_delete_device(device);
2122 2327 /*
2328 * life cycle of block is bound to device, so delete it after
2329 * device was safely removed
2330 */
2331 if (block)
2332 dasd_free_block(block);
2123 return 0; 2333 return 0;
2124} 2334}
2125 2335
2126int 2336int dasd_generic_notify(struct ccw_device *cdev, int event)
2127dasd_generic_notify(struct ccw_device *cdev, int event)
2128{ 2337{
2129 struct dasd_device *device; 2338 struct dasd_device *device;
2130 struct dasd_ccw_req *cqr; 2339 struct dasd_ccw_req *cqr;
@@ -2145,27 +2354,22 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
2145 if (device->state < DASD_STATE_BASIC) 2354 if (device->state < DASD_STATE_BASIC)
2146 break; 2355 break;
2147 /* Device is active. We want to keep it. */ 2356 /* Device is active. We want to keep it. */
2148 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2357 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2149 list_for_each_entry(cqr, &device->ccw_queue, list) 2358 if (cqr->status == DASD_CQR_IN_IO) {
2150 if (cqr->status == DASD_CQR_IN_IO) 2359 cqr->status = DASD_CQR_QUEUED;
2151 cqr->status = DASD_CQR_FAILED; 2360 cqr->retries++;
2152 device->stopped |= DASD_STOPPED_DC_EIO; 2361 }
2153 } else { 2362 device->stopped |= DASD_STOPPED_DC_WAIT;
2154 list_for_each_entry(cqr, &device->ccw_queue, list) 2363 dasd_device_clear_timer(device);
2155 if (cqr->status == DASD_CQR_IN_IO) { 2364 dasd_schedule_device_bh(device);
2156 cqr->status = DASD_CQR_QUEUED;
2157 cqr->retries++;
2158 }
2159 device->stopped |= DASD_STOPPED_DC_WAIT;
2160 dasd_set_timer(device, 0);
2161 }
2162 dasd_schedule_bh(device);
2163 ret = 1; 2365 ret = 1;
2164 break; 2366 break;
2165 case CIO_OPER: 2367 case CIO_OPER:
2166 /* FIXME: add a sanity check. */ 2368 /* FIXME: add a sanity check. */
2167 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2369 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2168 dasd_schedule_bh(device); 2370 dasd_schedule_device_bh(device);
2371 if (device->block)
2372 dasd_schedule_block_bh(device->block);
2169 ret = 1; 2373 ret = 1;
2170 break; 2374 break;
2171 } 2375 }
@@ -2195,7 +2399,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2195 ccw->cda = (__u32)(addr_t)rdc_buffer; 2399 ccw->cda = (__u32)(addr_t)rdc_buffer;
2196 ccw->count = rdc_buffer_size; 2400 ccw->count = rdc_buffer_size;
2197 2401
2198 cqr->device = device; 2402 cqr->startdev = device;
2403 cqr->memdev = device;
2199 cqr->expires = 10*HZ; 2404 cqr->expires = 10*HZ;
2200 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2405 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2201 cqr->retries = 2; 2406 cqr->retries = 2;
@@ -2217,13 +2422,12 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2217 return PTR_ERR(cqr); 2422 return PTR_ERR(cqr);
2218 2423
2219 ret = dasd_sleep_on(cqr); 2424 ret = dasd_sleep_on(cqr);
2220 dasd_sfree_request(cqr, cqr->device); 2425 dasd_sfree_request(cqr, cqr->memdev);
2221 return ret; 2426 return ret;
2222} 2427}
2223EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2428EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2224 2429
2225static int __init 2430static int __init dasd_init(void)
2226dasd_init(void)
2227{ 2431{
2228 int rc; 2432 int rc;
2229 2433
@@ -2231,7 +2435,7 @@ dasd_init(void)
2231 init_waitqueue_head(&dasd_flush_wq); 2435 init_waitqueue_head(&dasd_flush_wq);
2232 2436
2233 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2437 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2234 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2438 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2235 if (dasd_debug_area == NULL) { 2439 if (dasd_debug_area == NULL) {
2236 rc = -ENOMEM; 2440 rc = -ENOMEM;
2237 goto failed; 2441 goto failed;
@@ -2277,15 +2481,18 @@ EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2277EXPORT_SYMBOL(dasd_add_request_head); 2481EXPORT_SYMBOL(dasd_add_request_head);
2278EXPORT_SYMBOL(dasd_add_request_tail); 2482EXPORT_SYMBOL(dasd_add_request_tail);
2279EXPORT_SYMBOL(dasd_cancel_req); 2483EXPORT_SYMBOL(dasd_cancel_req);
2280EXPORT_SYMBOL(dasd_clear_timer); 2484EXPORT_SYMBOL(dasd_device_clear_timer);
2485EXPORT_SYMBOL(dasd_block_clear_timer);
2281EXPORT_SYMBOL(dasd_enable_device); 2486EXPORT_SYMBOL(dasd_enable_device);
2282EXPORT_SYMBOL(dasd_int_handler); 2487EXPORT_SYMBOL(dasd_int_handler);
2283EXPORT_SYMBOL(dasd_kfree_request); 2488EXPORT_SYMBOL(dasd_kfree_request);
2284EXPORT_SYMBOL(dasd_kick_device); 2489EXPORT_SYMBOL(dasd_kick_device);
2285EXPORT_SYMBOL(dasd_kmalloc_request); 2490EXPORT_SYMBOL(dasd_kmalloc_request);
2286EXPORT_SYMBOL(dasd_schedule_bh); 2491EXPORT_SYMBOL(dasd_schedule_device_bh);
2492EXPORT_SYMBOL(dasd_schedule_block_bh);
2287EXPORT_SYMBOL(dasd_set_target_state); 2493EXPORT_SYMBOL(dasd_set_target_state);
2288EXPORT_SYMBOL(dasd_set_timer); 2494EXPORT_SYMBOL(dasd_device_set_timer);
2495EXPORT_SYMBOL(dasd_block_set_timer);
2289EXPORT_SYMBOL(dasd_sfree_request); 2496EXPORT_SYMBOL(dasd_sfree_request);
2290EXPORT_SYMBOL(dasd_sleep_on); 2497EXPORT_SYMBOL(dasd_sleep_on);
2291EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2498EXPORT_SYMBOL(dasd_sleep_on_immediatly);
@@ -2299,4 +2506,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove);
2299EXPORT_SYMBOL_GPL(dasd_generic_notify); 2506EXPORT_SYMBOL_GPL(dasd_generic_notify);
2300EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2507EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2301EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2508EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2302 2509EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2510EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2511EXPORT_SYMBOL_GPL(dasd_alloc_block);
2512EXPORT_SYMBOL_GPL(dasd_free_block);