aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/block')
-rw-r--r--drivers/s390/block/Makefile4
-rw-r--r--drivers/s390/block/dasd.c1680
-rw-r--r--drivers/s390/block/dasd_3370_erp.c84
-rw-r--r--drivers/s390/block/dasd_3990_erp.c358
-rw-r--r--drivers/s390/block/dasd_9336_erp.c41
-rw-r--r--drivers/s390/block/dasd_9343_erp.c21
-rw-r--r--drivers/s390/block/dasd_alias.c903
-rw-r--r--drivers/s390/block/dasd_devmap.c94
-rw-r--r--drivers/s390/block/dasd_diag.c107
-rw-r--r--drivers/s390/block/dasd_eckd.c787
-rw-r--r--drivers/s390/block/dasd_eckd.h125
-rw-r--r--drivers/s390/block/dasd_eer.c11
-rw-r--r--drivers/s390/block/dasd_erp.c25
-rw-r--r--drivers/s390/block/dasd_fba.c119
-rw-r--r--drivers/s390/block/dasd_genhd.c76
-rw-r--r--drivers/s390/block/dasd_int.h209
-rw-r--r--drivers/s390/block/dasd_ioctl.c172
-rw-r--r--drivers/s390/block/dasd_proc.c21
18 files changed, 3117 insertions, 1720 deletions
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index be9f22d52fd8..0a89e080b389 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -2,8 +2,8 @@
2# S/390 block devices 2# S/390 block devices
3# 3#
4 4
5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o 5dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_alias.o
6dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o 6dasd_fba_mod-objs := dasd_fba.o
7dasd_diag_mod-objs := dasd_diag.o 7dasd_diag_mod-objs := dasd_diag.o
8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ 8dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
9 dasd_genhd.o dasd_erp.o 9 dasd_genhd.o dasd_erp.o
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index da4fe1ecef9e..db9193d38986 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -48,13 +48,15 @@ MODULE_LICENSE("GPL");
48/* 48/*
49 * SECTION: prototypes for static functions of dasd.c 49 * SECTION: prototypes for static functions of dasd.c
50 */ 50 */
51static int dasd_alloc_queue(struct dasd_device * device); 51static int dasd_alloc_queue(struct dasd_block *);
52static void dasd_setup_queue(struct dasd_device * device); 52static void dasd_setup_queue(struct dasd_block *);
53static void dasd_free_queue(struct dasd_device * device); 53static void dasd_free_queue(struct dasd_block *);
54static void dasd_flush_request_queue(struct dasd_device *); 54static void dasd_flush_request_queue(struct dasd_block *);
55static int dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_block_queue(struct dasd_block *);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_device_tasklet(struct dasd_device *);
57static void dasd_block_tasklet(struct dasd_block *);
57static void do_kick_device(struct work_struct *); 58static void do_kick_device(struct work_struct *);
59static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
58 60
59/* 61/*
60 * SECTION: Operations on the device structure. 62 * SECTION: Operations on the device structure.
@@ -65,26 +67,23 @@ static wait_queue_head_t dasd_flush_wq;
65/* 67/*
66 * Allocate memory for a new device structure. 68 * Allocate memory for a new device structure.
67 */ 69 */
68struct dasd_device * 70struct dasd_device *dasd_alloc_device(void)
69dasd_alloc_device(void)
70{ 71{
71 struct dasd_device *device; 72 struct dasd_device *device;
72 73
73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
74 if (device == NULL) 75 if (!device)
75 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
76 /* open_count = 0 means device online but not in use */
77 atomic_set(&device->open_count, -1);
78 77
79 /* Get two pages for normal block device operations. */ 78 /* Get two pages for normal block device operations. */
80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 79 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
81 if (device->ccw_mem == NULL) { 80 if (!device->ccw_mem) {
82 kfree(device); 81 kfree(device);
83 return ERR_PTR(-ENOMEM); 82 return ERR_PTR(-ENOMEM);
84 } 83 }
85 /* Get one page for error recovery. */ 84 /* Get one page for error recovery. */
86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 85 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
87 if (device->erp_mem == NULL) { 86 if (!device->erp_mem) {
88 free_pages((unsigned long) device->ccw_mem, 1); 87 free_pages((unsigned long) device->ccw_mem, 1);
89 kfree(device); 88 kfree(device);
90 return ERR_PTR(-ENOMEM); 89 return ERR_PTR(-ENOMEM);
@@ -93,10 +92,9 @@ dasd_alloc_device(void)
93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 92 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 93 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
95 spin_lock_init(&device->mem_lock); 94 spin_lock_init(&device->mem_lock);
96 spin_lock_init(&device->request_queue_lock); 95 atomic_set(&device->tasklet_scheduled, 0);
97 atomic_set (&device->tasklet_scheduled, 0);
98 tasklet_init(&device->tasklet, 96 tasklet_init(&device->tasklet,
99 (void (*)(unsigned long)) dasd_tasklet, 97 (void (*)(unsigned long)) dasd_device_tasklet,
100 (unsigned long) device); 98 (unsigned long) device);
101 INIT_LIST_HEAD(&device->ccw_queue); 99 INIT_LIST_HEAD(&device->ccw_queue);
102 init_timer(&device->timer); 100 init_timer(&device->timer);
@@ -110,8 +108,7 @@ dasd_alloc_device(void)
110/* 108/*
111 * Free memory of a device structure. 109 * Free memory of a device structure.
112 */ 110 */
113void 111void dasd_free_device(struct dasd_device *device)
114dasd_free_device(struct dasd_device *device)
115{ 112{
116 kfree(device->private); 113 kfree(device->private);
117 free_page((unsigned long) device->erp_mem); 114 free_page((unsigned long) device->erp_mem);
@@ -120,10 +117,42 @@ dasd_free_device(struct dasd_device *device)
120} 117}
121 118
122/* 119/*
120 * Allocate memory for a new device structure.
121 */
122struct dasd_block *dasd_alloc_block(void)
123{
124 struct dasd_block *block;
125
126 block = kzalloc(sizeof(*block), GFP_ATOMIC);
127 if (!block)
128 return ERR_PTR(-ENOMEM);
129 /* open_count = 0 means device online but not in use */
130 atomic_set(&block->open_count, -1);
131
132 spin_lock_init(&block->request_queue_lock);
133 atomic_set(&block->tasklet_scheduled, 0);
134 tasklet_init(&block->tasklet,
135 (void (*)(unsigned long)) dasd_block_tasklet,
136 (unsigned long) block);
137 INIT_LIST_HEAD(&block->ccw_queue);
138 spin_lock_init(&block->queue_lock);
139 init_timer(&block->timer);
140
141 return block;
142}
143
144/*
145 * Free memory of a device structure.
146 */
147void dasd_free_block(struct dasd_block *block)
148{
149 kfree(block);
150}
151
152/*
123 * Make a new device known to the system. 153 * Make a new device known to the system.
124 */ 154 */
125static int 155static int dasd_state_new_to_known(struct dasd_device *device)
126dasd_state_new_to_known(struct dasd_device *device)
127{ 156{
128 int rc; 157 int rc;
129 158
@@ -133,12 +162,13 @@ dasd_state_new_to_known(struct dasd_device *device)
133 */ 162 */
134 dasd_get_device(device); 163 dasd_get_device(device);
135 164
136 rc = dasd_alloc_queue(device); 165 if (device->block) {
137 if (rc) { 166 rc = dasd_alloc_queue(device->block);
138 dasd_put_device(device); 167 if (rc) {
139 return rc; 168 dasd_put_device(device);
169 return rc;
170 }
140 } 171 }
141
142 device->state = DASD_STATE_KNOWN; 172 device->state = DASD_STATE_KNOWN;
143 return 0; 173 return 0;
144} 174}
@@ -146,21 +176,24 @@ dasd_state_new_to_known(struct dasd_device *device)
146/* 176/*
147 * Let the system forget about a device. 177 * Let the system forget about a device.
148 */ 178 */
149static int 179static int dasd_state_known_to_new(struct dasd_device *device)
150dasd_state_known_to_new(struct dasd_device * device)
151{ 180{
152 /* Disable extended error reporting for this device. */ 181 /* Disable extended error reporting for this device. */
153 dasd_eer_disable(device); 182 dasd_eer_disable(device);
154 /* Forget the discipline information. */ 183 /* Forget the discipline information. */
155 if (device->discipline) 184 if (device->discipline) {
185 if (device->discipline->uncheck_device)
186 device->discipline->uncheck_device(device);
156 module_put(device->discipline->owner); 187 module_put(device->discipline->owner);
188 }
157 device->discipline = NULL; 189 device->discipline = NULL;
158 if (device->base_discipline) 190 if (device->base_discipline)
159 module_put(device->base_discipline->owner); 191 module_put(device->base_discipline->owner);
160 device->base_discipline = NULL; 192 device->base_discipline = NULL;
161 device->state = DASD_STATE_NEW; 193 device->state = DASD_STATE_NEW;
162 194
163 dasd_free_queue(device); 195 if (device->block)
196 dasd_free_queue(device->block);
164 197
165 /* Give up reference we took in dasd_state_new_to_known. */ 198 /* Give up reference we took in dasd_state_new_to_known. */
166 dasd_put_device(device); 199 dasd_put_device(device);
@@ -170,19 +203,19 @@ dasd_state_known_to_new(struct dasd_device * device)
170/* 203/*
171 * Request the irq line for the device. 204 * Request the irq line for the device.
172 */ 205 */
173static int 206static int dasd_state_known_to_basic(struct dasd_device *device)
174dasd_state_known_to_basic(struct dasd_device * device)
175{ 207{
176 int rc; 208 int rc;
177 209
178 /* Allocate and register gendisk structure. */ 210 /* Allocate and register gendisk structure. */
179 rc = dasd_gendisk_alloc(device); 211 if (device->block) {
180 if (rc) 212 rc = dasd_gendisk_alloc(device->block);
181 return rc; 213 if (rc)
182 214 return rc;
215 }
183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 216 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 217 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2,
185 8 * sizeof (long)); 218 8 * sizeof(long));
186 debug_register_view(device->debug_area, &debug_sprintf_view); 219 debug_register_view(device->debug_area, &debug_sprintf_view);
187 debug_set_level(device->debug_area, DBF_WARNING); 220 debug_set_level(device->debug_area, DBF_WARNING);
188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 221 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
@@ -194,16 +227,17 @@ dasd_state_known_to_basic(struct dasd_device * device)
194/* 227/*
195 * Release the irq line for the device. Terminate any running i/o. 228 * Release the irq line for the device. Terminate any running i/o.
196 */ 229 */
197static int 230static int dasd_state_basic_to_known(struct dasd_device *device)
198dasd_state_basic_to_known(struct dasd_device * device)
199{ 231{
200 int rc; 232 int rc;
201 233 if (device->block) {
202 dasd_gendisk_free(device); 234 dasd_gendisk_free(device->block);
203 rc = dasd_flush_ccw_queue(device, 1); 235 dasd_block_clear_timer(device->block);
236 }
237 rc = dasd_flush_device_queue(device);
204 if (rc) 238 if (rc)
205 return rc; 239 return rc;
206 dasd_clear_timer(device); 240 dasd_device_clear_timer(device);
207 241
208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 242 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
209 if (device->debug_area != NULL) { 243 if (device->debug_area != NULL) {
@@ -228,26 +262,32 @@ dasd_state_basic_to_known(struct dasd_device * device)
228 * In case the analysis returns an error, the device setup is stopped 262 * In case the analysis returns an error, the device setup is stopped
229 * (a fake disk was already added to allow formatting). 263 * (a fake disk was already added to allow formatting).
230 */ 264 */
231static int 265static int dasd_state_basic_to_ready(struct dasd_device *device)
232dasd_state_basic_to_ready(struct dasd_device * device)
233{ 266{
234 int rc; 267 int rc;
268 struct dasd_block *block;
235 269
236 rc = 0; 270 rc = 0;
237 if (device->discipline->do_analysis != NULL) 271 block = device->block;
238 rc = device->discipline->do_analysis(device);
239 if (rc) {
240 if (rc != -EAGAIN)
241 device->state = DASD_STATE_UNFMT;
242 return rc;
243 }
244 /* make disk known with correct capacity */ 272 /* make disk known with correct capacity */
245 dasd_setup_queue(device); 273 if (block) {
246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 274 if (block->base->discipline->do_analysis != NULL)
247 device->state = DASD_STATE_READY; 275 rc = block->base->discipline->do_analysis(block);
248 rc = dasd_scan_partitions(device); 276 if (rc) {
249 if (rc) 277 if (rc != -EAGAIN)
250 device->state = DASD_STATE_BASIC; 278 device->state = DASD_STATE_UNFMT;
279 return rc;
280 }
281 dasd_setup_queue(block);
282 set_capacity(block->gdp,
283 block->blocks << block->s2b_shift);
284 device->state = DASD_STATE_READY;
285 rc = dasd_scan_partitions(block);
286 if (rc)
287 device->state = DASD_STATE_BASIC;
288 } else {
289 device->state = DASD_STATE_READY;
290 }
251 return rc; 291 return rc;
252} 292}
253 293
@@ -256,28 +296,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
256 * Forget format information. Check if the target level is basic 296 * Forget format information. Check if the target level is basic
257 * and if it is create fake disk for formatting. 297 * and if it is create fake disk for formatting.
258 */ 298 */
259static int 299static int dasd_state_ready_to_basic(struct dasd_device *device)
260dasd_state_ready_to_basic(struct dasd_device * device)
261{ 300{
262 int rc; 301 int rc;
263 302
264 rc = dasd_flush_ccw_queue(device, 0);
265 if (rc)
266 return rc;
267 dasd_destroy_partitions(device);
268 dasd_flush_request_queue(device);
269 device->blocks = 0;
270 device->bp_block = 0;
271 device->s2b_shift = 0;
272 device->state = DASD_STATE_BASIC; 303 device->state = DASD_STATE_BASIC;
304 if (device->block) {
305 struct dasd_block *block = device->block;
306 rc = dasd_flush_block_queue(block);
307 if (rc) {
308 device->state = DASD_STATE_READY;
309 return rc;
310 }
311 dasd_destroy_partitions(block);
312 dasd_flush_request_queue(block);
313 block->blocks = 0;
314 block->bp_block = 0;
315 block->s2b_shift = 0;
316 }
273 return 0; 317 return 0;
274} 318}
275 319
276/* 320/*
277 * Back to basic. 321 * Back to basic.
278 */ 322 */
279static int 323static int dasd_state_unfmt_to_basic(struct dasd_device *device)
280dasd_state_unfmt_to_basic(struct dasd_device * device)
281{ 324{
282 device->state = DASD_STATE_BASIC; 325 device->state = DASD_STATE_BASIC;
283 return 0; 326 return 0;
@@ -291,17 +334,31 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
291static int 334static int
292dasd_state_ready_to_online(struct dasd_device * device) 335dasd_state_ready_to_online(struct dasd_device * device)
293{ 336{
337 int rc;
338
339 if (device->discipline->ready_to_online) {
340 rc = device->discipline->ready_to_online(device);
341 if (rc)
342 return rc;
343 }
294 device->state = DASD_STATE_ONLINE; 344 device->state = DASD_STATE_ONLINE;
295 dasd_schedule_bh(device); 345 if (device->block)
346 dasd_schedule_block_bh(device->block);
296 return 0; 347 return 0;
297} 348}
298 349
299/* 350/*
300 * Stop the requeueing of requests again. 351 * Stop the requeueing of requests again.
301 */ 352 */
302static int 353static int dasd_state_online_to_ready(struct dasd_device *device)
303dasd_state_online_to_ready(struct dasd_device * device)
304{ 354{
355 int rc;
356
357 if (device->discipline->online_to_ready) {
358 rc = device->discipline->online_to_ready(device);
359 if (rc)
360 return rc;
361 }
305 device->state = DASD_STATE_READY; 362 device->state = DASD_STATE_READY;
306 return 0; 363 return 0;
307} 364}
@@ -309,8 +366,7 @@ dasd_state_online_to_ready(struct dasd_device * device)
309/* 366/*
310 * Device startup state changes. 367 * Device startup state changes.
311 */ 368 */
312static int 369static int dasd_increase_state(struct dasd_device *device)
313dasd_increase_state(struct dasd_device *device)
314{ 370{
315 int rc; 371 int rc;
316 372
@@ -345,8 +401,7 @@ dasd_increase_state(struct dasd_device *device)
345/* 401/*
346 * Device shutdown state changes. 402 * Device shutdown state changes.
347 */ 403 */
348static int 404static int dasd_decrease_state(struct dasd_device *device)
349dasd_decrease_state(struct dasd_device *device)
350{ 405{
351 int rc; 406 int rc;
352 407
@@ -381,8 +436,7 @@ dasd_decrease_state(struct dasd_device *device)
381/* 436/*
382 * This is the main startup/shutdown routine. 437 * This is the main startup/shutdown routine.
383 */ 438 */
384static void 439static void dasd_change_state(struct dasd_device *device)
385dasd_change_state(struct dasd_device *device)
386{ 440{
387 int rc; 441 int rc;
388 442
@@ -409,17 +463,15 @@ dasd_change_state(struct dasd_device *device)
409 * dasd_kick_device will schedule a call do do_kick_device to the kernel 463 * dasd_kick_device will schedule a call do do_kick_device to the kernel
410 * event daemon. 464 * event daemon.
411 */ 465 */
412static void 466static void do_kick_device(struct work_struct *work)
413do_kick_device(struct work_struct *work)
414{ 467{
415 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 468 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
416 dasd_change_state(device); 469 dasd_change_state(device);
417 dasd_schedule_bh(device); 470 dasd_schedule_device_bh(device);
418 dasd_put_device(device); 471 dasd_put_device(device);
419} 472}
420 473
421void 474void dasd_kick_device(struct dasd_device *device)
422dasd_kick_device(struct dasd_device *device)
423{ 475{
424 dasd_get_device(device); 476 dasd_get_device(device);
425 /* queue call to dasd_kick_device to the kernel event daemon. */ 477 /* queue call to dasd_kick_device to the kernel event daemon. */
@@ -429,8 +481,7 @@ dasd_kick_device(struct dasd_device *device)
429/* 481/*
430 * Set the target state for a device and starts the state change. 482 * Set the target state for a device and starts the state change.
431 */ 483 */
432void 484void dasd_set_target_state(struct dasd_device *device, int target)
433dasd_set_target_state(struct dasd_device *device, int target)
434{ 485{
435 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 486 /* If we are in probeonly mode stop at DASD_STATE_READY. */
436 if (dasd_probeonly && target > DASD_STATE_READY) 487 if (dasd_probeonly && target > DASD_STATE_READY)
@@ -447,14 +498,12 @@ dasd_set_target_state(struct dasd_device *device, int target)
447/* 498/*
448 * Enable devices with device numbers in [from..to]. 499 * Enable devices with device numbers in [from..to].
449 */ 500 */
450static inline int 501static inline int _wait_for_device(struct dasd_device *device)
451_wait_for_device(struct dasd_device *device)
452{ 502{
453 return (device->state == device->target); 503 return (device->state == device->target);
454} 504}
455 505
456void 506void dasd_enable_device(struct dasd_device *device)
457dasd_enable_device(struct dasd_device *device)
458{ 507{
459 dasd_set_target_state(device, DASD_STATE_ONLINE); 508 dasd_set_target_state(device, DASD_STATE_ONLINE);
460 if (device->state <= DASD_STATE_KNOWN) 509 if (device->state <= DASD_STATE_KNOWN)
@@ -475,20 +524,20 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF;
475/* 524/*
476 * Increments counter in global and local profiling structures. 525 * Increments counter in global and local profiling structures.
477 */ 526 */
478#define dasd_profile_counter(value, counter, device) \ 527#define dasd_profile_counter(value, counter, block) \
479{ \ 528{ \
480 int index; \ 529 int index; \
481 for (index = 0; index < 31 && value >> (2+index); index++); \ 530 for (index = 0; index < 31 && value >> (2+index); index++); \
482 dasd_global_profile.counter[index]++; \ 531 dasd_global_profile.counter[index]++; \
483 device->profile.counter[index]++; \ 532 block->profile.counter[index]++; \
484} 533}
485 534
486/* 535/*
487 * Add profiling information for cqr before execution. 536 * Add profiling information for cqr before execution.
488 */ 537 */
489static void 538static void dasd_profile_start(struct dasd_block *block,
490dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 539 struct dasd_ccw_req *cqr,
491 struct request *req) 540 struct request *req)
492{ 541{
493 struct list_head *l; 542 struct list_head *l;
494 unsigned int counter; 543 unsigned int counter;
@@ -498,19 +547,19 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr,
498 547
499 /* count the length of the chanq for statistics */ 548 /* count the length of the chanq for statistics */
500 counter = 0; 549 counter = 0;
501 list_for_each(l, &device->ccw_queue) 550 list_for_each(l, &block->ccw_queue)
502 if (++counter >= 31) 551 if (++counter >= 31)
503 break; 552 break;
504 dasd_global_profile.dasd_io_nr_req[counter]++; 553 dasd_global_profile.dasd_io_nr_req[counter]++;
505 device->profile.dasd_io_nr_req[counter]++; 554 block->profile.dasd_io_nr_req[counter]++;
506} 555}
507 556
508/* 557/*
509 * Add profiling information for cqr after execution. 558 * Add profiling information for cqr after execution.
510 */ 559 */
511static void 560static void dasd_profile_end(struct dasd_block *block,
512dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 561 struct dasd_ccw_req *cqr,
513 struct request *req) 562 struct request *req)
514{ 563{
515 long strtime, irqtime, endtime, tottime; /* in microseconds */ 564 long strtime, irqtime, endtime, tottime; /* in microseconds */
516 long tottimeps, sectors; 565 long tottimeps, sectors;
@@ -532,27 +581,27 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
532 581
533 if (!dasd_global_profile.dasd_io_reqs) 582 if (!dasd_global_profile.dasd_io_reqs)
534 memset(&dasd_global_profile, 0, 583 memset(&dasd_global_profile, 0,
535 sizeof (struct dasd_profile_info_t)); 584 sizeof(struct dasd_profile_info_t));
536 dasd_global_profile.dasd_io_reqs++; 585 dasd_global_profile.dasd_io_reqs++;
537 dasd_global_profile.dasd_io_sects += sectors; 586 dasd_global_profile.dasd_io_sects += sectors;
538 587
539 if (!device->profile.dasd_io_reqs) 588 if (!block->profile.dasd_io_reqs)
540 memset(&device->profile, 0, 589 memset(&block->profile, 0,
541 sizeof (struct dasd_profile_info_t)); 590 sizeof(struct dasd_profile_info_t));
542 device->profile.dasd_io_reqs++; 591 block->profile.dasd_io_reqs++;
543 device->profile.dasd_io_sects += sectors; 592 block->profile.dasd_io_sects += sectors;
544 593
545 dasd_profile_counter(sectors, dasd_io_secs, device); 594 dasd_profile_counter(sectors, dasd_io_secs, block);
546 dasd_profile_counter(tottime, dasd_io_times, device); 595 dasd_profile_counter(tottime, dasd_io_times, block);
547 dasd_profile_counter(tottimeps, dasd_io_timps, device); 596 dasd_profile_counter(tottimeps, dasd_io_timps, block);
548 dasd_profile_counter(strtime, dasd_io_time1, device); 597 dasd_profile_counter(strtime, dasd_io_time1, block);
549 dasd_profile_counter(irqtime, dasd_io_time2, device); 598 dasd_profile_counter(irqtime, dasd_io_time2, block);
550 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 599 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
551 dasd_profile_counter(endtime, dasd_io_time3, device); 600 dasd_profile_counter(endtime, dasd_io_time3, block);
552} 601}
553#else 602#else
554#define dasd_profile_start(device, cqr, req) do {} while (0) 603#define dasd_profile_start(block, cqr, req) do {} while (0)
555#define dasd_profile_end(device, cqr, req) do {} while (0) 604#define dasd_profile_end(block, cqr, req) do {} while (0)
556#endif /* CONFIG_DASD_PROFILE */ 605#endif /* CONFIG_DASD_PROFILE */
557 606
558/* 607/*
@@ -562,9 +611,9 @@ dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr,
562 * memory and 2) dasd_smalloc_request uses the static ccw memory 611 * memory and 2) dasd_smalloc_request uses the static ccw memory
563 * that gets allocated for each device. 612 * that gets allocated for each device.
564 */ 613 */
565struct dasd_ccw_req * 614struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength,
566dasd_kmalloc_request(char *magic, int cplength, int datasize, 615 int datasize,
567 struct dasd_device * device) 616 struct dasd_device *device)
568{ 617{
569 struct dasd_ccw_req *cqr; 618 struct dasd_ccw_req *cqr;
570 619
@@ -600,9 +649,9 @@ dasd_kmalloc_request(char *magic, int cplength, int datasize,
600 return cqr; 649 return cqr;
601} 650}
602 651
603struct dasd_ccw_req * 652struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength,
604dasd_smalloc_request(char *magic, int cplength, int datasize, 653 int datasize,
605 struct dasd_device * device) 654 struct dasd_device *device)
606{ 655{
607 unsigned long flags; 656 unsigned long flags;
608 struct dasd_ccw_req *cqr; 657 struct dasd_ccw_req *cqr;
@@ -649,8 +698,7 @@ dasd_smalloc_request(char *magic, int cplength, int datasize,
649 * idal lists that might have been created by dasd_set_cda and the 698 * idal lists that might have been created by dasd_set_cda and the
650 * struct dasd_ccw_req itself. 699 * struct dasd_ccw_req itself.
651 */ 700 */
652void 701void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
653dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
654{ 702{
655#ifdef CONFIG_64BIT 703#ifdef CONFIG_64BIT
656 struct ccw1 *ccw; 704 struct ccw1 *ccw;
@@ -667,8 +715,7 @@ dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
667 dasd_put_device(device); 715 dasd_put_device(device);
668} 716}
669 717
670void 718void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
671dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
672{ 719{
673 unsigned long flags; 720 unsigned long flags;
674 721
@@ -681,14 +728,13 @@ dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
681/* 728/*
682 * Check discipline magic in cqr. 729 * Check discipline magic in cqr.
683 */ 730 */
684static inline int 731static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
685dasd_check_cqr(struct dasd_ccw_req *cqr)
686{ 732{
687 struct dasd_device *device; 733 struct dasd_device *device;
688 734
689 if (cqr == NULL) 735 if (cqr == NULL)
690 return -EINVAL; 736 return -EINVAL;
691 device = cqr->device; 737 device = cqr->startdev;
692 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 738 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
693 DEV_MESSAGE(KERN_WARNING, device, 739 DEV_MESSAGE(KERN_WARNING, device,
694 " dasd_ccw_req 0x%08x magic doesn't match" 740 " dasd_ccw_req 0x%08x magic doesn't match"
@@ -706,8 +752,7 @@ dasd_check_cqr(struct dasd_ccw_req *cqr)
706 * ccw_device_clear can fail if the i/o subsystem 752 * ccw_device_clear can fail if the i/o subsystem
707 * is in a bad mood. 753 * is in a bad mood.
708 */ 754 */
709int 755int dasd_term_IO(struct dasd_ccw_req *cqr)
710dasd_term_IO(struct dasd_ccw_req * cqr)
711{ 756{
712 struct dasd_device *device; 757 struct dasd_device *device;
713 int retries, rc; 758 int retries, rc;
@@ -717,13 +762,13 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
717 if (rc) 762 if (rc)
718 return rc; 763 return rc;
719 retries = 0; 764 retries = 0;
720 device = (struct dasd_device *) cqr->device; 765 device = (struct dasd_device *) cqr->startdev;
721 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 766 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
722 rc = ccw_device_clear(device->cdev, (long) cqr); 767 rc = ccw_device_clear(device->cdev, (long) cqr);
723 switch (rc) { 768 switch (rc) {
724 case 0: /* termination successful */ 769 case 0: /* termination successful */
725 cqr->retries--; 770 cqr->retries--;
726 cqr->status = DASD_CQR_CLEAR; 771 cqr->status = DASD_CQR_CLEAR_PENDING;
727 cqr->stopclk = get_clock(); 772 cqr->stopclk = get_clock();
728 cqr->starttime = 0; 773 cqr->starttime = 0;
729 DBF_DEV_EVENT(DBF_DEBUG, device, 774 DBF_DEV_EVENT(DBF_DEBUG, device,
@@ -753,7 +798,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
753 } 798 }
754 retries++; 799 retries++;
755 } 800 }
756 dasd_schedule_bh(device); 801 dasd_schedule_device_bh(device);
757 return rc; 802 return rc;
758} 803}
759 804
@@ -761,8 +806,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
761 * Start the i/o. This start_IO can fail if the channel is really busy. 806 * Start the i/o. This start_IO can fail if the channel is really busy.
762 * In that case set up a timer to start the request later. 807 * In that case set up a timer to start the request later.
763 */ 808 */
764int 809int dasd_start_IO(struct dasd_ccw_req *cqr)
765dasd_start_IO(struct dasd_ccw_req * cqr)
766{ 810{
767 struct dasd_device *device; 811 struct dasd_device *device;
768 int rc; 812 int rc;
@@ -771,12 +815,12 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
771 rc = dasd_check_cqr(cqr); 815 rc = dasd_check_cqr(cqr);
772 if (rc) 816 if (rc)
773 return rc; 817 return rc;
774 device = (struct dasd_device *) cqr->device; 818 device = (struct dasd_device *) cqr->startdev;
775 if (cqr->retries < 0) { 819 if (cqr->retries < 0) {
776 DEV_MESSAGE(KERN_DEBUG, device, 820 DEV_MESSAGE(KERN_DEBUG, device,
777 "start_IO: request %p (%02x/%i) - no retry left.", 821 "start_IO: request %p (%02x/%i) - no retry left.",
778 cqr, cqr->status, cqr->retries); 822 cqr, cqr->status, cqr->retries);
779 cqr->status = DASD_CQR_FAILED; 823 cqr->status = DASD_CQR_ERROR;
780 return -EIO; 824 return -EIO;
781 } 825 }
782 cqr->startclk = get_clock(); 826 cqr->startclk = get_clock();
@@ -833,8 +877,7 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
833 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 877 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
834 * DASD_CQR_QUEUED for 2) and 3). 878 * DASD_CQR_QUEUED for 2) and 3).
835 */ 879 */
836static void 880static void dasd_device_timeout(unsigned long ptr)
837dasd_timeout_device(unsigned long ptr)
838{ 881{
839 unsigned long flags; 882 unsigned long flags;
840 struct dasd_device *device; 883 struct dasd_device *device;
@@ -844,14 +887,13 @@ dasd_timeout_device(unsigned long ptr)
844 /* re-activate request queue */ 887 /* re-activate request queue */
845 device->stopped &= ~DASD_STOPPED_PENDING; 888 device->stopped &= ~DASD_STOPPED_PENDING;
846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 889 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
847 dasd_schedule_bh(device); 890 dasd_schedule_device_bh(device);
848} 891}
849 892
850/* 893/*
851 * Setup timeout for a device in jiffies. 894 * Setup timeout for a device in jiffies.
852 */ 895 */
853void 896void dasd_device_set_timer(struct dasd_device *device, int expires)
854dasd_set_timer(struct dasd_device *device, int expires)
855{ 897{
856 if (expires == 0) { 898 if (expires == 0) {
857 if (timer_pending(&device->timer)) 899 if (timer_pending(&device->timer))
@@ -862,7 +904,7 @@ dasd_set_timer(struct dasd_device *device, int expires)
862 if (mod_timer(&device->timer, jiffies + expires)) 904 if (mod_timer(&device->timer, jiffies + expires))
863 return; 905 return;
864 } 906 }
865 device->timer.function = dasd_timeout_device; 907 device->timer.function = dasd_device_timeout;
866 device->timer.data = (unsigned long) device; 908 device->timer.data = (unsigned long) device;
867 device->timer.expires = jiffies + expires; 909 device->timer.expires = jiffies + expires;
868 add_timer(&device->timer); 910 add_timer(&device->timer);
@@ -871,15 +913,14 @@ dasd_set_timer(struct dasd_device *device, int expires)
871/* 913/*
872 * Clear timeout for a device. 914 * Clear timeout for a device.
873 */ 915 */
874void 916void dasd_device_clear_timer(struct dasd_device *device)
875dasd_clear_timer(struct dasd_device *device)
876{ 917{
877 if (timer_pending(&device->timer)) 918 if (timer_pending(&device->timer))
878 del_timer(&device->timer); 919 del_timer(&device->timer);
879} 920}
880 921
881static void 922static void dasd_handle_killed_request(struct ccw_device *cdev,
882dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) 923 unsigned long intparm)
883{ 924{
884 struct dasd_ccw_req *cqr; 925 struct dasd_ccw_req *cqr;
885 struct dasd_device *device; 926 struct dasd_device *device;
@@ -893,7 +934,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
893 return; 934 return;
894 } 935 }
895 936
896 device = (struct dasd_device *) cqr->device; 937 device = (struct dasd_device *) cqr->startdev;
897 if (device == NULL || 938 if (device == NULL ||
898 device != dasd_device_from_cdev_locked(cdev) || 939 device != dasd_device_from_cdev_locked(cdev) ||
899 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 940 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
@@ -905,46 +946,32 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm)
905 /* Schedule request to be retried. */ 946 /* Schedule request to be retried. */
906 cqr->status = DASD_CQR_QUEUED; 947 cqr->status = DASD_CQR_QUEUED;
907 948
908 dasd_clear_timer(device); 949 dasd_device_clear_timer(device);
909 dasd_schedule_bh(device); 950 dasd_schedule_device_bh(device);
910 dasd_put_device(device); 951 dasd_put_device(device);
911} 952}
912 953
913static void 954void dasd_generic_handle_state_change(struct dasd_device *device)
914dasd_handle_state_change_pending(struct dasd_device *device)
915{ 955{
916 struct dasd_ccw_req *cqr;
917 struct list_head *l, *n;
918
919 /* First of all start sense subsystem status request. */ 956 /* First of all start sense subsystem status request. */
920 dasd_eer_snss(device); 957 dasd_eer_snss(device);
921 958
922 device->stopped &= ~DASD_STOPPED_PENDING; 959 device->stopped &= ~DASD_STOPPED_PENDING;
923 960 dasd_schedule_device_bh(device);
924 /* restart all 'running' IO on queue */ 961 if (device->block)
925 list_for_each_safe(l, n, &device->ccw_queue) { 962 dasd_schedule_block_bh(device->block);
926 cqr = list_entry(l, struct dasd_ccw_req, list);
927 if (cqr->status == DASD_CQR_IN_IO) {
928 cqr->status = DASD_CQR_QUEUED;
929 }
930 }
931 dasd_clear_timer(device);
932 dasd_schedule_bh(device);
933} 963}
934 964
935/* 965/*
936 * Interrupt handler for "normal" ssch-io based dasd devices. 966 * Interrupt handler for "normal" ssch-io based dasd devices.
937 */ 967 */
938void 968void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
939dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 969 struct irb *irb)
940 struct irb *irb)
941{ 970{
942 struct dasd_ccw_req *cqr, *next; 971 struct dasd_ccw_req *cqr, *next;
943 struct dasd_device *device; 972 struct dasd_device *device;
944 unsigned long long now; 973 unsigned long long now;
945 int expires; 974 int expires;
946 dasd_era_t era;
947 char mask;
948 975
949 if (IS_ERR(irb)) { 976 if (IS_ERR(irb)) {
950 switch (PTR_ERR(irb)) { 977 switch (PTR_ERR(irb)) {
@@ -969,29 +996,25 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
969 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 996 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat),
970 (unsigned int) intparm); 997 (unsigned int) intparm);
971 998
972 /* first of all check for state change pending interrupt */ 999 /* check for unsolicited interrupts */
973 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1000 cqr = (struct dasd_ccw_req *) intparm;
974 if ((irb->scsw.dstat & mask) == mask) { 1001 if (!cqr || ((irb->scsw.cc == 1) &&
1002 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
1003 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) {
1004 if (cqr && cqr->status == DASD_CQR_IN_IO)
1005 cqr->status = DASD_CQR_QUEUED;
975 device = dasd_device_from_cdev_locked(cdev); 1006 device = dasd_device_from_cdev_locked(cdev);
976 if (!IS_ERR(device)) { 1007 if (!IS_ERR(device)) {
977 dasd_handle_state_change_pending(device); 1008 dasd_device_clear_timer(device);
1009 device->discipline->handle_unsolicited_interrupt(device,
1010 irb);
978 dasd_put_device(device); 1011 dasd_put_device(device);
979 } 1012 }
980 return; 1013 return;
981 } 1014 }
982 1015
983 cqr = (struct dasd_ccw_req *) intparm; 1016 device = (struct dasd_device *) cqr->startdev;
984 1017 if (!device ||
985 /* check for unsolicited interrupts */
986 if (cqr == NULL) {
987 MESSAGE(KERN_DEBUG,
988 "unsolicited interrupt received: bus_id %s",
989 cdev->dev.bus_id);
990 return;
991 }
992
993 device = (struct dasd_device *) cqr->device;
994 if (device == NULL ||
995 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1018 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
996 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1019 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s",
997 cdev->dev.bus_id); 1020 cdev->dev.bus_id);
@@ -999,12 +1022,12 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
999 } 1022 }
1000 1023
1001 /* Check for clear pending */ 1024 /* Check for clear pending */
1002 if (cqr->status == DASD_CQR_CLEAR && 1025 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1003 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1026 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
1004 cqr->status = DASD_CQR_QUEUED; 1027 cqr->status = DASD_CQR_CLEARED;
1005 dasd_clear_timer(device); 1028 dasd_device_clear_timer(device);
1006 wake_up(&dasd_flush_wq); 1029 wake_up(&dasd_flush_wq);
1007 dasd_schedule_bh(device); 1030 dasd_schedule_device_bh(device);
1008 return; 1031 return;
1009 } 1032 }
1010 1033
@@ -1017,272 +1040,164 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1017 } 1040 }
1018 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1041 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p",
1019 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1042 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr);
1020 1043 next = NULL;
1021 /* Find out the appropriate era_action. */
1022 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC)
1023 era = dasd_era_fatal;
1024 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1025 irb->scsw.cstat == 0 &&
1026 !irb->esw.esw0.erw.cons)
1027 era = dasd_era_none;
1028 else if (irb->esw.esw0.erw.cons)
1029 era = device->discipline->examine_error(cqr, irb);
1030 else
1031 era = dasd_era_recover;
1032
1033 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era);
1034 expires = 0; 1044 expires = 0;
1035 if (era == dasd_era_none) { 1045 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1036 cqr->status = DASD_CQR_DONE; 1046 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) {
1047 /* request was completed successfully */
1048 cqr->status = DASD_CQR_SUCCESS;
1037 cqr->stopclk = now; 1049 cqr->stopclk = now;
1038 /* Start first request on queue if possible -> fast_io. */ 1050 /* Start first request on queue if possible -> fast_io. */
1039 if (cqr->list.next != &device->ccw_queue) { 1051 if (cqr->devlist.next != &device->ccw_queue) {
1040 next = list_entry(cqr->list.next, 1052 next = list_entry(cqr->devlist.next,
1041 struct dasd_ccw_req, list); 1053 struct dasd_ccw_req, devlist);
1042 if ((next->status == DASD_CQR_QUEUED) &&
1043 (!device->stopped)) {
1044 if (device->discipline->start_IO(next) == 0)
1045 expires = next->expires;
1046 else
1047 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1048 "Interrupt fastpath "
1049 "failed!");
1050 }
1051 } 1054 }
1052 } else { /* error */ 1055 } else { /* error */
1053 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1056 memcpy(&cqr->irb, irb, sizeof(struct irb));
1054 if (device->features & DASD_FEATURE_ERPLOG) { 1057 if (device->features & DASD_FEATURE_ERPLOG) {
1055 /* dump sense data */
1056 dasd_log_sense(cqr, irb); 1058 dasd_log_sense(cqr, irb);
1057 } 1059 }
1058 switch (era) { 1060 /* If we have no sense data, or we just don't want complex ERP
1059 case dasd_era_fatal: 1061 * for this request, but if we have retries left, then just
1060 cqr->status = DASD_CQR_FAILED; 1062 * reset this request and retry it in the fastpath
1061 cqr->stopclk = now; 1063 */
1062 break; 1064 if (!(cqr->irb.esw.esw0.erw.cons &&
1063 case dasd_era_recover: 1065 test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
1066 cqr->retries > 0) {
1067 DEV_MESSAGE(KERN_DEBUG, device,
1068 "default ERP in fastpath (%i retries left)",
1069 cqr->retries);
1070 cqr->lpm = LPM_ANYPATH;
1071 cqr->status = DASD_CQR_QUEUED;
1072 next = cqr;
1073 } else
1064 cqr->status = DASD_CQR_ERROR; 1074 cqr->status = DASD_CQR_ERROR;
1065 break; 1075 }
1066 default: 1076 if (next && (next->status == DASD_CQR_QUEUED) &&
1067 BUG(); 1077 (!device->stopped)) {
1068 } 1078 if (device->discipline->start_IO(next) == 0)
1079 expires = next->expires;
1080 else
1081 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1082 "Interrupt fastpath "
1083 "failed!");
1069 } 1084 }
1070 if (expires != 0) 1085 if (expires != 0)
1071 dasd_set_timer(device, expires); 1086 dasd_device_set_timer(device, expires);
1072 else 1087 else
1073 dasd_clear_timer(device); 1088 dasd_device_clear_timer(device);
1074 dasd_schedule_bh(device); 1089 dasd_schedule_device_bh(device);
1075} 1090}
1076 1091
1077/* 1092/*
1078 * posts the buffer_cache about a finalized request 1093 * If we have an error on a dasd_block layer request then we cancel
1094 * and return all further requests from the same dasd_block as well.
1079 */ 1095 */
1080static inline void 1096static void __dasd_device_recovery(struct dasd_device *device,
1081dasd_end_request(struct request *req, int uptodate) 1097 struct dasd_ccw_req *ref_cqr)
1082{ 1098{
1083 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1099 struct list_head *l, *n;
1084 BUG(); 1100 struct dasd_ccw_req *cqr;
1085 add_disk_randomness(req->rq_disk);
1086 end_that_request_last(req, uptodate);
1087}
1088 1101
1089/* 1102 /*
1090 * Process finished error recovery ccw. 1103 * only requeue request that came from the dasd_block layer
1091 */ 1104 */
1092static inline void 1105 if (!ref_cqr->block)
1093__dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1106 return;
1094{
1095 dasd_erp_fn_t erp_fn;
1096 1107
1097 if (cqr->status == DASD_CQR_DONE) 1108 list_for_each_safe(l, n, &device->ccw_queue) {
1098 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1109 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1099 else 1110 if (cqr->status == DASD_CQR_QUEUED &&
1100 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1111 ref_cqr->block == cqr->block) {
1101 erp_fn = device->discipline->erp_postaction(cqr); 1112 cqr->status = DASD_CQR_CLEARED;
1102 erp_fn(cqr); 1113 }
1103} 1114 }
1115};
1104 1116
1105/* 1117/*
1106 * Process ccw request queue. 1118 * Remove those ccw requests from the queue that need to be returned
1119 * to the upper layer.
1107 */ 1120 */
1108static void 1121static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1109__dasd_process_ccw_queue(struct dasd_device * device, 1122 struct list_head *final_queue)
1110 struct list_head *final_queue)
1111{ 1123{
1112 struct list_head *l, *n; 1124 struct list_head *l, *n;
1113 struct dasd_ccw_req *cqr; 1125 struct dasd_ccw_req *cqr;
1114 dasd_erp_fn_t erp_fn;
1115 1126
1116restart:
1117 /* Process request with final status. */ 1127 /* Process request with final status. */
1118 list_for_each_safe(l, n, &device->ccw_queue) { 1128 list_for_each_safe(l, n, &device->ccw_queue) {
1119 cqr = list_entry(l, struct dasd_ccw_req, list); 1129 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1130
1120 /* Stop list processing at the first non-final request. */ 1131 /* Stop list processing at the first non-final request. */
1121 if (cqr->status != DASD_CQR_DONE && 1132 if (cqr->status == DASD_CQR_QUEUED ||
1122 cqr->status != DASD_CQR_FAILED && 1133 cqr->status == DASD_CQR_IN_IO ||
1123 cqr->status != DASD_CQR_ERROR) 1134 cqr->status == DASD_CQR_CLEAR_PENDING)
1124 break; 1135 break;
1125 /* Process requests with DASD_CQR_ERROR */
1126 if (cqr->status == DASD_CQR_ERROR) { 1136 if (cqr->status == DASD_CQR_ERROR) {
1127 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1137 __dasd_device_recovery(device, cqr);
1128 cqr->status = DASD_CQR_FAILED;
1129 cqr->stopclk = get_clock();
1130 } else {
1131 if (cqr->irb.esw.esw0.erw.cons &&
1132 test_bit(DASD_CQR_FLAGS_USE_ERP,
1133 &cqr->flags)) {
1134 erp_fn = device->discipline->
1135 erp_action(cqr);
1136 erp_fn(cqr);
1137 } else
1138 dasd_default_erp_action(cqr);
1139 }
1140 goto restart;
1141 }
1142
1143 /* First of all call extended error reporting. */
1144 if (dasd_eer_enabled(device) &&
1145 cqr->status == DASD_CQR_FAILED) {
1146 dasd_eer_write(device, cqr, DASD_EER_FATALERROR);
1147
1148 /* restart request */
1149 cqr->status = DASD_CQR_QUEUED;
1150 cqr->retries = 255;
1151 device->stopped |= DASD_STOPPED_QUIESCE;
1152 goto restart;
1153 } 1138 }
1154
1155 /* Process finished ERP request. */
1156 if (cqr->refers) {
1157 __dasd_process_erp(device, cqr);
1158 goto restart;
1159 }
1160
1161 /* Rechain finished requests to final queue */ 1139 /* Rechain finished requests to final queue */
1162 cqr->endclk = get_clock(); 1140 list_move_tail(&cqr->devlist, final_queue);
1163 list_move_tail(&cqr->list, final_queue);
1164 } 1141 }
1165} 1142}
1166 1143
1167static void
1168dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data)
1169{
1170 struct request *req;
1171 struct dasd_device *device;
1172 int status;
1173
1174 req = (struct request *) data;
1175 device = cqr->device;
1176 dasd_profile_end(device, cqr, req);
1177 status = cqr->device->discipline->free_cp(cqr,req);
1178 spin_lock_irq(&device->request_queue_lock);
1179 dasd_end_request(req, status);
1180 spin_unlock_irq(&device->request_queue_lock);
1181}
1182
1183
1184/* 1144/*
1185 * Fetch requests from the block device queue. 1145 * the cqrs from the final queue are returned to the upper layer
1146 * by setting a dasd_block state and calling the callback function
1186 */ 1147 */
1187static void 1148static void __dasd_device_process_final_queue(struct dasd_device *device,
1188__dasd_process_blk_queue(struct dasd_device * device) 1149 struct list_head *final_queue)
1189{ 1150{
1190 struct request_queue *queue; 1151 struct list_head *l, *n;
1191 struct request *req;
1192 struct dasd_ccw_req *cqr; 1152 struct dasd_ccw_req *cqr;
1193 int nr_queued;
1194
1195 queue = device->request_queue;
1196 /* No queue ? Then there is nothing to do. */
1197 if (queue == NULL)
1198 return;
1199
1200 /*
1201 * We requeue request from the block device queue to the ccw
1202 * queue only in two states. In state DASD_STATE_READY the
1203 * partition detection is done and we need to requeue requests
1204 * for that. State DASD_STATE_ONLINE is normal block device
1205 * operation.
1206 */
1207 if (device->state != DASD_STATE_READY &&
1208 device->state != DASD_STATE_ONLINE)
1209 return;
1210 nr_queued = 0;
1211 /* Now we try to fetch requests from the request queue */
1212 list_for_each_entry(cqr, &device->ccw_queue, list)
1213 if (cqr->status == DASD_CQR_QUEUED)
1214 nr_queued++;
1215 while (!blk_queue_plugged(queue) &&
1216 elv_next_request(queue) &&
1217 nr_queued < DASD_CHANQ_MAX_SIZE) {
1218 req = elv_next_request(queue);
1219 1153
1220 if (device->features & DASD_FEATURE_READONLY && 1154 list_for_each_safe(l, n, final_queue) {
1221 rq_data_dir(req) == WRITE) { 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1222 DBF_DEV_EVENT(DBF_ERR, device, 1156 list_del_init(&cqr->devlist);
1223 "Rejecting write request %p", 1157 if (cqr->block)
1224 req); 1158 spin_lock_bh(&cqr->block->queue_lock);
1225 blkdev_dequeue_request(req); 1159 switch (cqr->status) {
1226 dasd_end_request(req, 0); 1160 case DASD_CQR_SUCCESS:
1227 continue; 1161 cqr->status = DASD_CQR_DONE;
1228 } 1162 break;
1229 if (device->stopped & DASD_STOPPED_DC_EIO) { 1163 case DASD_CQR_ERROR:
1230 blkdev_dequeue_request(req); 1164 cqr->status = DASD_CQR_NEED_ERP;
1231 dasd_end_request(req, 0); 1165 break;
1232 continue; 1166 case DASD_CQR_CLEARED:
1233 } 1167 cqr->status = DASD_CQR_TERMINATED;
1234 cqr = device->discipline->build_cp(device, req); 1168 break;
1235 if (IS_ERR(cqr)) { 1169 default:
1236 if (PTR_ERR(cqr) == -ENOMEM) 1170 DEV_MESSAGE(KERN_ERR, device,
1237 break; /* terminate request queue loop */ 1171 "wrong cqr status in __dasd_process_final_queue "
1238 if (PTR_ERR(cqr) == -EAGAIN) { 1172 "for cqr %p, status %x",
1239 /* 1173 cqr, cqr->status);
1240 * The current request cannot be build right 1174 BUG();
1241 * now, we have to try later. If this request
1242 * is the head-of-queue we stop the device
1243 * for 1/2 second.
1244 */
1245 if (!list_empty(&device->ccw_queue))
1246 break;
1247 device->stopped |= DASD_STOPPED_PENDING;
1248 dasd_set_timer(device, HZ/2);
1249 break;
1250 }
1251 DBF_DEV_EVENT(DBF_ERR, device,
1252 "CCW creation failed (rc=%ld) "
1253 "on request %p",
1254 PTR_ERR(cqr), req);
1255 blkdev_dequeue_request(req);
1256 dasd_end_request(req, 0);
1257 continue;
1258 } 1175 }
1259 cqr->callback = dasd_end_request_cb; 1176 if (cqr->block)
1260 cqr->callback_data = (void *) req; 1177 spin_unlock_bh(&cqr->block->queue_lock);
1261 cqr->status = DASD_CQR_QUEUED; 1178 if (cqr->callback != NULL)
1262 blkdev_dequeue_request(req); 1179 (cqr->callback)(cqr, cqr->callback_data);
1263 list_add_tail(&cqr->list, &device->ccw_queue);
1264 dasd_profile_start(device, cqr, req);
1265 nr_queued++;
1266 } 1180 }
1267} 1181}
1268 1182
1183
1184
1269/* 1185/*
1270 * Take a look at the first request on the ccw queue and check 1186 * Take a look at the first request on the ccw queue and check
1271 * if it reached its expire time. If so, terminate the IO. 1187 * if it reached its expire time. If so, terminate the IO.
1272 */ 1188 */
1273static void 1189static void __dasd_device_check_expire(struct dasd_device *device)
1274__dasd_check_expire(struct dasd_device * device)
1275{ 1190{
1276 struct dasd_ccw_req *cqr; 1191 struct dasd_ccw_req *cqr;
1277 1192
1278 if (list_empty(&device->ccw_queue)) 1193 if (list_empty(&device->ccw_queue))
1279 return; 1194 return;
1280 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1195 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1281 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1196 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1282 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1197 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1283 if (device->discipline->term_IO(cqr) != 0) { 1198 if (device->discipline->term_IO(cqr) != 0) {
1284 /* Hmpf, try again in 5 sec */ 1199 /* Hmpf, try again in 5 sec */
1285 dasd_set_timer(device, 5*HZ); 1200 dasd_device_set_timer(device, 5*HZ);
1286 DEV_MESSAGE(KERN_ERR, device, 1201 DEV_MESSAGE(KERN_ERR, device,
1287 "internal error - timeout (%is) expired " 1202 "internal error - timeout (%is) expired "
1288 "for cqr %p, termination failed, " 1203 "for cqr %p, termination failed, "
@@ -1301,77 +1216,53 @@ __dasd_check_expire(struct dasd_device * device)
1301 * Take a look at the first request on the ccw queue and check 1216 * Take a look at the first request on the ccw queue and check
1302 * if it needs to be started. 1217 * if it needs to be started.
1303 */ 1218 */
1304static void 1219static void __dasd_device_start_head(struct dasd_device *device)
1305__dasd_start_head(struct dasd_device * device)
1306{ 1220{
1307 struct dasd_ccw_req *cqr; 1221 struct dasd_ccw_req *cqr;
1308 int rc; 1222 int rc;
1309 1223
1310 if (list_empty(&device->ccw_queue)) 1224 if (list_empty(&device->ccw_queue))
1311 return; 1225 return;
1312 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1313 if (cqr->status != DASD_CQR_QUEUED) 1227 if (cqr->status != DASD_CQR_QUEUED)
1314 return; 1228 return;
1315 /* Non-temporary stop condition will trigger fail fast */ 1229 /* when device is stopped, return request to previous layer */
1316 if (device->stopped & ~DASD_STOPPED_PENDING && 1230 if (device->stopped) {
1317 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1231 cqr->status = DASD_CQR_CLEARED;
1318 (!dasd_eer_enabled(device))) { 1232 dasd_schedule_device_bh(device);
1319 cqr->status = DASD_CQR_FAILED;
1320 dasd_schedule_bh(device);
1321 return; 1233 return;
1322 } 1234 }
1323 /* Don't try to start requests if device is stopped */
1324 if (device->stopped)
1325 return;
1326 1235
1327 rc = device->discipline->start_IO(cqr); 1236 rc = device->discipline->start_IO(cqr);
1328 if (rc == 0) 1237 if (rc == 0)
1329 dasd_set_timer(device, cqr->expires); 1238 dasd_device_set_timer(device, cqr->expires);
1330 else if (rc == -EACCES) { 1239 else if (rc == -EACCES) {
1331 dasd_schedule_bh(device); 1240 dasd_schedule_device_bh(device);
1332 } else 1241 } else
1333 /* Hmpf, try again in 1/2 sec */ 1242 /* Hmpf, try again in 1/2 sec */
1334 dasd_set_timer(device, 50); 1243 dasd_device_set_timer(device, 50);
1335}
1336
1337static inline int
1338_wait_for_clear(struct dasd_ccw_req *cqr)
1339{
1340 return (cqr->status == DASD_CQR_QUEUED);
1341} 1244}
1342 1245
1343/* 1246/*
1344 * Remove all requests from the ccw queue (all = '1') or only block device 1247 * Go through all request on the dasd_device request queue,
1345 * requests in case all = '0'. 1248 * terminate them on the cdev if necessary, and return them to the
1346 * Take care of the erp-chain (chained via cqr->refers) and remove either 1249 * submitting layer via callback.
1347 * the whole erp-chain or none of the erp-requests. 1250 * Note:
1348 * If a request is currently running, term_IO is called and the request 1251 * Make sure that all 'submitting layers' still exist when
1349 * is re-queued. Prior to removing the terminated request we need to wait 1252 * this function is called!. In other words, when 'device' is a base
1350 * for the clear-interrupt. 1253 * device then all block layer requests must have been removed before
1351 * In case termination is not possible we stop processing and just finishing 1254 * via dasd_flush_block_queue.
1352 * the already moved requests.
1353 */ 1255 */
1354static int 1256int dasd_flush_device_queue(struct dasd_device *device)
1355dasd_flush_ccw_queue(struct dasd_device * device, int all)
1356{ 1257{
1357 struct dasd_ccw_req *cqr, *orig, *n; 1258 struct dasd_ccw_req *cqr, *n;
1358 int rc, i; 1259 int rc;
1359
1360 struct list_head flush_queue; 1260 struct list_head flush_queue;
1361 1261
1362 INIT_LIST_HEAD(&flush_queue); 1262 INIT_LIST_HEAD(&flush_queue);
1363 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1263 spin_lock_irq(get_ccwdev_lock(device->cdev));
1364 rc = 0; 1264 rc = 0;
1365restart: 1265 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1366 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
1367 /* get original request of erp request-chain */
1368 for (orig = cqr; orig->refers != NULL; orig = orig->refers);
1369
1370 /* Flush all request or only block device requests? */
1371 if (all == 0 && cqr->callback != dasd_end_request_cb &&
1372 orig->callback != dasd_end_request_cb) {
1373 continue;
1374 }
1375 /* Check status and move request to flush_queue */ 1266 /* Check status and move request to flush_queue */
1376 switch (cqr->status) { 1267 switch (cqr->status) {
1377 case DASD_CQR_IN_IO: 1268 case DASD_CQR_IN_IO:
@@ -1387,90 +1278,60 @@ restart:
1387 } 1278 }
1388 break; 1279 break;
1389 case DASD_CQR_QUEUED: 1280 case DASD_CQR_QUEUED:
1390 case DASD_CQR_ERROR:
1391 /* set request to FAILED */
1392 cqr->stopclk = get_clock(); 1281 cqr->stopclk = get_clock();
1393 cqr->status = DASD_CQR_FAILED; 1282 cqr->status = DASD_CQR_CLEARED;
1394 break; 1283 break;
1395 default: /* do not touch the others */ 1284 default: /* no need to modify the others */
1396 break; 1285 break;
1397 } 1286 }
1398 /* Rechain request (including erp chain) */ 1287 list_move_tail(&cqr->devlist, &flush_queue);
1399 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
1400 cqr->endclk = get_clock();
1401 list_move_tail(&cqr->list, &flush_queue);
1402 }
1403 if (i > 1)
1404 /* moved more than one request - need to restart */
1405 goto restart;
1406 } 1288 }
1407
1408finished: 1289finished:
1409 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1290 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1410 /* Now call the callback function of flushed requests */ 1291 /*
1411restart_cb: 1292 * After this point all requests must be in state CLEAR_PENDING,
1412 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1293 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1413 if (cqr->status == DASD_CQR_CLEAR) { 1294 * one of the others.
1414 /* wait for clear interrupt! */ 1295 */
1415 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1296 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1416 cqr->status = DASD_CQR_FAILED; 1297 wait_event(dasd_flush_wq,
1417 } 1298 (cqr->status != DASD_CQR_CLEAR_PENDING));
1418 /* Process finished ERP request. */ 1299 /*
1419 if (cqr->refers) { 1300 * Now set each request back to TERMINATED, DONE or NEED_ERP
1420 __dasd_process_erp(device, cqr); 1301 * and call the callback function of flushed requests
1421 /* restart list_for_xx loop since dasd_process_erp 1302 */
1422 * might remove multiple elements */ 1303 __dasd_device_process_final_queue(device, &flush_queue);
1423 goto restart_cb;
1424 }
1425 /* call the callback function */
1426 cqr->endclk = get_clock();
1427 if (cqr->callback != NULL)
1428 (cqr->callback)(cqr, cqr->callback_data);
1429 }
1430 return rc; 1304 return rc;
1431} 1305}
1432 1306
1433/* 1307/*
1434 * Acquire the device lock and process queues for the device. 1308 * Acquire the device lock and process queues for the device.
1435 */ 1309 */
1436static void 1310static void dasd_device_tasklet(struct dasd_device *device)
1437dasd_tasklet(struct dasd_device * device)
1438{ 1311{
1439 struct list_head final_queue; 1312 struct list_head final_queue;
1440 struct list_head *l, *n;
1441 struct dasd_ccw_req *cqr;
1442 1313
1443 atomic_set (&device->tasklet_scheduled, 0); 1314 atomic_set (&device->tasklet_scheduled, 0);
1444 INIT_LIST_HEAD(&final_queue); 1315 INIT_LIST_HEAD(&final_queue);
1445 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1316 spin_lock_irq(get_ccwdev_lock(device->cdev));
1446 /* Check expire time of first request on the ccw queue. */ 1317 /* Check expire time of first request on the ccw queue. */
1447 __dasd_check_expire(device); 1318 __dasd_device_check_expire(device);
1448 /* Finish off requests on ccw queue */ 1319 /* find final requests on ccw queue */
1449 __dasd_process_ccw_queue(device, &final_queue); 1320 __dasd_device_process_ccw_queue(device, &final_queue);
1450 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1321 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1451 /* Now call the callback function of requests with final status */ 1322 /* Now call the callback function of requests with final status */
1452 list_for_each_safe(l, n, &final_queue) { 1323 __dasd_device_process_final_queue(device, &final_queue);
1453 cqr = list_entry(l, struct dasd_ccw_req, list); 1324 spin_lock_irq(get_ccwdev_lock(device->cdev));
1454 list_del_init(&cqr->list);
1455 if (cqr->callback != NULL)
1456 (cqr->callback)(cqr, cqr->callback_data);
1457 }
1458 spin_lock_irq(&device->request_queue_lock);
1459 spin_lock(get_ccwdev_lock(device->cdev));
1460 /* Get new request from the block device request queue */
1461 __dasd_process_blk_queue(device);
1462 /* Now check if the head of the ccw queue needs to be started. */ 1325 /* Now check if the head of the ccw queue needs to be started. */
1463 __dasd_start_head(device); 1326 __dasd_device_start_head(device);
1464 spin_unlock(get_ccwdev_lock(device->cdev)); 1327 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1465 spin_unlock_irq(&device->request_queue_lock);
1466 dasd_put_device(device); 1328 dasd_put_device(device);
1467} 1329}
1468 1330
1469/* 1331/*
1470 * Schedules a call to dasd_tasklet over the device tasklet. 1332 * Schedules a call to dasd_tasklet over the device tasklet.
1471 */ 1333 */
1472void 1334void dasd_schedule_device_bh(struct dasd_device *device)
1473dasd_schedule_bh(struct dasd_device * device)
1474{ 1335{
1475 /* Protect against rescheduling. */ 1336 /* Protect against rescheduling. */
1476 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1337 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
@@ -1480,160 +1341,109 @@ dasd_schedule_bh(struct dasd_device * device)
1480} 1341}
1481 1342
1482/* 1343/*
1483 * Queue a request to the head of the ccw_queue. Start the I/O if 1344 * Queue a request to the head of the device ccw_queue.
1484 * possible. 1345 * Start the I/O if possible.
1485 */ 1346 */
1486void 1347void dasd_add_request_head(struct dasd_ccw_req *cqr)
1487dasd_add_request_head(struct dasd_ccw_req *req)
1488{ 1348{
1489 struct dasd_device *device; 1349 struct dasd_device *device;
1490 unsigned long flags; 1350 unsigned long flags;
1491 1351
1492 device = req->device; 1352 device = cqr->startdev;
1493 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1353 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1494 req->status = DASD_CQR_QUEUED; 1354 cqr->status = DASD_CQR_QUEUED;
1495 req->device = device; 1355 list_add(&cqr->devlist, &device->ccw_queue);
1496 list_add(&req->list, &device->ccw_queue);
1497 /* let the bh start the request to keep them in order */ 1356 /* let the bh start the request to keep them in order */
1498 dasd_schedule_bh(device); 1357 dasd_schedule_device_bh(device);
1499 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1358 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1500} 1359}
1501 1360
1502/* 1361/*
1503 * Queue a request to the tail of the ccw_queue. Start the I/O if 1362 * Queue a request to the tail of the device ccw_queue.
1504 * possible. 1363 * Start the I/O if possible.
1505 */ 1364 */
1506void 1365void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1507dasd_add_request_tail(struct dasd_ccw_req *req)
1508{ 1366{
1509 struct dasd_device *device; 1367 struct dasd_device *device;
1510 unsigned long flags; 1368 unsigned long flags;
1511 1369
1512 device = req->device; 1370 device = cqr->startdev;
1513 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1371 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1514 req->status = DASD_CQR_QUEUED; 1372 cqr->status = DASD_CQR_QUEUED;
1515 req->device = device; 1373 list_add_tail(&cqr->devlist, &device->ccw_queue);
1516 list_add_tail(&req->list, &device->ccw_queue);
1517 /* let the bh start the request to keep them in order */ 1374 /* let the bh start the request to keep them in order */
1518 dasd_schedule_bh(device); 1375 dasd_schedule_device_bh(device);
1519 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1376 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1520} 1377}
1521 1378
1522/* 1379/*
1523 * Wakeup callback. 1380 * Wakeup helper for the 'sleep_on' functions.
1524 */ 1381 */
1525static void 1382static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1526dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1527{ 1383{
1528 wake_up((wait_queue_head_t *) data); 1384 wake_up((wait_queue_head_t *) data);
1529} 1385}
1530 1386
1531static inline int 1387static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1532_wait_for_wakeup(struct dasd_ccw_req *cqr)
1533{ 1388{
1534 struct dasd_device *device; 1389 struct dasd_device *device;
1535 int rc; 1390 int rc;
1536 1391
1537 device = cqr->device; 1392 device = cqr->startdev;
1538 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1393 spin_lock_irq(get_ccwdev_lock(device->cdev));
1539 rc = ((cqr->status == DASD_CQR_DONE || 1394 rc = ((cqr->status == DASD_CQR_DONE ||
1540 cqr->status == DASD_CQR_FAILED) && 1395 cqr->status == DASD_CQR_NEED_ERP ||
1541 list_empty(&cqr->list)); 1396 cqr->status == DASD_CQR_TERMINATED) &&
1397 list_empty(&cqr->devlist));
1542 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1398 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1543 return rc; 1399 return rc;
1544} 1400}
1545 1401
1546/* 1402/*
1547 * Attempts to start a special ccw queue and waits for its completion. 1403 * Queue a request to the tail of the device ccw_queue and wait for
1404 * it's completion.
1548 */ 1405 */
1549int 1406int dasd_sleep_on(struct dasd_ccw_req *cqr)
1550dasd_sleep_on(struct dasd_ccw_req * cqr)
1551{ 1407{
1552 wait_queue_head_t wait_q; 1408 wait_queue_head_t wait_q;
1553 struct dasd_device *device; 1409 struct dasd_device *device;
1554 int rc; 1410 int rc;
1555 1411
1556 device = cqr->device; 1412 device = cqr->startdev;
1557 spin_lock_irq(get_ccwdev_lock(device->cdev));
1558 1413
1559 init_waitqueue_head (&wait_q); 1414 init_waitqueue_head (&wait_q);
1560 cqr->callback = dasd_wakeup_cb; 1415 cqr->callback = dasd_wakeup_cb;
1561 cqr->callback_data = (void *) &wait_q; 1416 cqr->callback_data = (void *) &wait_q;
1562 cqr->status = DASD_CQR_QUEUED; 1417 dasd_add_request_tail(cqr);
1563 list_add_tail(&cqr->list, &device->ccw_queue);
1564
1565 /* let the bh start the request to keep them in order */
1566 dasd_schedule_bh(device);
1567
1568 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1569
1570 wait_event(wait_q, _wait_for_wakeup(cqr)); 1418 wait_event(wait_q, _wait_for_wakeup(cqr));
1571 1419
1572 /* Request status is either done or failed. */ 1420 /* Request status is either done or failed. */
1573 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1421 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1574 return rc; 1422 return rc;
1575} 1423}
1576 1424
1577/* 1425/*
1578 * Attempts to start a special ccw queue and wait interruptible 1426 * Queue a request to the tail of the device ccw_queue and wait
1579 * for its completion. 1427 * interruptible for it's completion.
1580 */ 1428 */
1581int 1429int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1582dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1583{ 1430{
1584 wait_queue_head_t wait_q; 1431 wait_queue_head_t wait_q;
1585 struct dasd_device *device; 1432 struct dasd_device *device;
1586 int rc, finished; 1433 int rc;
1587
1588 device = cqr->device;
1589 spin_lock_irq(get_ccwdev_lock(device->cdev));
1590 1434
1435 device = cqr->startdev;
1591 init_waitqueue_head (&wait_q); 1436 init_waitqueue_head (&wait_q);
1592 cqr->callback = dasd_wakeup_cb; 1437 cqr->callback = dasd_wakeup_cb;
1593 cqr->callback_data = (void *) &wait_q; 1438 cqr->callback_data = (void *) &wait_q;
1594 cqr->status = DASD_CQR_QUEUED; 1439 dasd_add_request_tail(cqr);
1595 list_add_tail(&cqr->list, &device->ccw_queue); 1440 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1596 1441 if (rc == -ERESTARTSYS) {
1597 /* let the bh start the request to keep them in order */ 1442 dasd_cancel_req(cqr);
1598 dasd_schedule_bh(device); 1443 /* wait (non-interruptible) for final status */
1599 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1444 wait_event(wait_q, _wait_for_wakeup(cqr));
1600
1601 finished = 0;
1602 while (!finished) {
1603 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr));
1604 if (rc != -ERESTARTSYS) {
1605 /* Request is final (done or failed) */
1606 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1607 break;
1608 }
1609 spin_lock_irq(get_ccwdev_lock(device->cdev));
1610 switch (cqr->status) {
1611 case DASD_CQR_IN_IO:
1612 /* terminate runnig cqr */
1613 if (device->discipline->term_IO) {
1614 cqr->retries = -1;
1615 device->discipline->term_IO(cqr);
1616 /* wait (non-interruptible) for final status
1617 * because signal ist still pending */
1618 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1619 wait_event(wait_q, _wait_for_wakeup(cqr));
1620 spin_lock_irq(get_ccwdev_lock(device->cdev));
1621 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1622 finished = 1;
1623 }
1624 break;
1625 case DASD_CQR_QUEUED:
1626 /* request */
1627 list_del_init(&cqr->list);
1628 rc = -EIO;
1629 finished = 1;
1630 break;
1631 default:
1632 /* cqr with 'non-interruptable' status - just wait */
1633 break;
1634 }
1635 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1636 } 1445 }
1446 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1637 return rc; 1447 return rc;
1638} 1448}
1639 1449
@@ -1643,25 +1453,23 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1643 * and be put back to status queued, before the special request is added 1453 * and be put back to status queued, before the special request is added
1644 * to the head of the queue. Then the special request is waited on normally. 1454 * to the head of the queue. Then the special request is waited on normally.
1645 */ 1455 */
1646static inline int 1456static inline int _dasd_term_running_cqr(struct dasd_device *device)
1647_dasd_term_running_cqr(struct dasd_device *device)
1648{ 1457{
1649 struct dasd_ccw_req *cqr; 1458 struct dasd_ccw_req *cqr;
1650 1459
1651 if (list_empty(&device->ccw_queue)) 1460 if (list_empty(&device->ccw_queue))
1652 return 0; 1461 return 0;
1653 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1462 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1654 return device->discipline->term_IO(cqr); 1463 return device->discipline->term_IO(cqr);
1655} 1464}
1656 1465
1657int 1466int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1658dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1659{ 1467{
1660 wait_queue_head_t wait_q; 1468 wait_queue_head_t wait_q;
1661 struct dasd_device *device; 1469 struct dasd_device *device;
1662 int rc; 1470 int rc;
1663 1471
1664 device = cqr->device; 1472 device = cqr->startdev;
1665 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1473 spin_lock_irq(get_ccwdev_lock(device->cdev));
1666 rc = _dasd_term_running_cqr(device); 1474 rc = _dasd_term_running_cqr(device);
1667 if (rc) { 1475 if (rc) {
@@ -1673,17 +1481,17 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1673 cqr->callback = dasd_wakeup_cb; 1481 cqr->callback = dasd_wakeup_cb;
1674 cqr->callback_data = (void *) &wait_q; 1482 cqr->callback_data = (void *) &wait_q;
1675 cqr->status = DASD_CQR_QUEUED; 1483 cqr->status = DASD_CQR_QUEUED;
1676 list_add(&cqr->list, &device->ccw_queue); 1484 list_add(&cqr->devlist, &device->ccw_queue);
1677 1485
1678 /* let the bh start the request to keep them in order */ 1486 /* let the bh start the request to keep them in order */
1679 dasd_schedule_bh(device); 1487 dasd_schedule_device_bh(device);
1680 1488
1681 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1489 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1682 1490
1683 wait_event(wait_q, _wait_for_wakeup(cqr)); 1491 wait_event(wait_q, _wait_for_wakeup(cqr));
1684 1492
1685 /* Request status is either done or failed. */ 1493 /* Request status is either done or failed. */
1686 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1687 return rc; 1495 return rc;
1688} 1496}
1689 1497
@@ -1692,11 +1500,14 @@ dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)
1692 * This is useful to timeout requests. The request will be 1500 * This is useful to timeout requests. The request will be
1693 * terminated if it is currently in i/o. 1501 * terminated if it is currently in i/o.
1694 * Returns 1 if the request has been terminated. 1502 * Returns 1 if the request has been terminated.
1503 * 0 if there was no need to terminate the request (not started yet)
1504 * negative error code if termination failed
1505 * Cancellation of a request is an asynchronous operation! The calling
1506 * function has to wait until the request is properly returned via callback.
1695 */ 1507 */
1696int 1508int dasd_cancel_req(struct dasd_ccw_req *cqr)
1697dasd_cancel_req(struct dasd_ccw_req *cqr)
1698{ 1509{
1699 struct dasd_device *device = cqr->device; 1510 struct dasd_device *device = cqr->startdev;
1700 unsigned long flags; 1511 unsigned long flags;
1701 int rc; 1512 int rc;
1702 1513
@@ -1704,74 +1515,453 @@ dasd_cancel_req(struct dasd_ccw_req *cqr)
1704 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1705 switch (cqr->status) { 1516 switch (cqr->status) {
1706 case DASD_CQR_QUEUED: 1517 case DASD_CQR_QUEUED:
1707 /* request was not started - just set to failed */ 1518 /* request was not started - just set to cleared */
1708 cqr->status = DASD_CQR_FAILED; 1519 cqr->status = DASD_CQR_CLEARED;
1709 break; 1520 break;
1710 case DASD_CQR_IN_IO: 1521 case DASD_CQR_IN_IO:
1711 /* request in IO - terminate IO and release again */ 1522 /* request in IO - terminate IO and release again */
1712 if (device->discipline->term_IO(cqr) != 0) 1523 rc = device->discipline->term_IO(cqr);
1713 /* what to do if unable to terminate ?????? 1524 if (rc) {
1714 e.g. not _IN_IO */ 1525 DEV_MESSAGE(KERN_ERR, device,
1715 cqr->status = DASD_CQR_FAILED; 1526 "dasd_cancel_req is unable "
1716 cqr->stopclk = get_clock(); 1527 " to terminate request %p, rc = %d",
1717 rc = 1; 1528 cqr, rc);
1529 } else {
1530 cqr->stopclk = get_clock();
1531 rc = 1;
1532 }
1718 break; 1533 break;
1719 case DASD_CQR_DONE: 1534 default: /* already finished or clear pending - do nothing */
1720 case DASD_CQR_FAILED:
1721 /* already finished - do nothing */
1722 break; 1535 break;
1723 default: 1536 }
1724 DEV_MESSAGE(KERN_ALERT, device, 1537 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1725 "invalid status %02x in request", 1538 dasd_schedule_device_bh(device);
1726 cqr->status); 1539 return rc;
1540}
1541
1542
1543/*
1544 * SECTION: Operations of the dasd_block layer.
1545 */
1546
1547/*
1548 * Timeout function for dasd_block. This is used when the block layer
1549 * is waiting for something that may not come reliably, (e.g. a state
1550 * change interrupt)
1551 */
1552static void dasd_block_timeout(unsigned long ptr)
1553{
1554 unsigned long flags;
1555 struct dasd_block *block;
1556
1557 block = (struct dasd_block *) ptr;
1558 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1559 /* re-activate request queue */
1560 block->base->stopped &= ~DASD_STOPPED_PENDING;
1561 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1562 dasd_schedule_block_bh(block);
1563}
1564
1565/*
1566 * Setup timeout for a dasd_block in jiffies.
1567 */
1568void dasd_block_set_timer(struct dasd_block *block, int expires)
1569{
1570 if (expires == 0) {
1571 if (timer_pending(&block->timer))
1572 del_timer(&block->timer);
1573 return;
1574 }
1575 if (timer_pending(&block->timer)) {
1576 if (mod_timer(&block->timer, jiffies + expires))
1577 return;
1578 }
1579 block->timer.function = dasd_block_timeout;
1580 block->timer.data = (unsigned long) block;
1581 block->timer.expires = jiffies + expires;
1582 add_timer(&block->timer);
1583}
1584
1585/*
1586 * Clear timeout for a dasd_block.
1587 */
1588void dasd_block_clear_timer(struct dasd_block *block)
1589{
1590 if (timer_pending(&block->timer))
1591 del_timer(&block->timer);
1592}
1593
1594/*
1595 * posts the buffer_cache about a finalized request
1596 */
1597static inline void dasd_end_request(struct request *req, int uptodate)
1598{
1599 if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
1727 BUG(); 1600 BUG();
1601 add_disk_randomness(req->rq_disk);
1602 end_that_request_last(req, uptodate);
1603}
1728 1604
1605/*
1606 * Process finished error recovery ccw.
1607 */
1608static inline void __dasd_block_process_erp(struct dasd_block *block,
1609 struct dasd_ccw_req *cqr)
1610{
1611 dasd_erp_fn_t erp_fn;
1612 struct dasd_device *device = block->base;
1613
1614 if (cqr->status == DASD_CQR_DONE)
1615 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1616 else
1617 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful");
1618 erp_fn = device->discipline->erp_postaction(cqr);
1619 erp_fn(cqr);
1620}
1621
1622/*
1623 * Fetch requests from the block device queue.
1624 */
1625static void __dasd_process_request_queue(struct dasd_block *block)
1626{
1627 struct request_queue *queue;
1628 struct request *req;
1629 struct dasd_ccw_req *cqr;
1630 struct dasd_device *basedev;
1631 unsigned long flags;
1632 queue = block->request_queue;
1633 basedev = block->base;
1634 /* No queue ? Then there is nothing to do. */
1635 if (queue == NULL)
1636 return;
1637
1638 /*
1639 * We requeue request from the block device queue to the ccw
1640 * queue only in two states. In state DASD_STATE_READY the
1641 * partition detection is done and we need to requeue requests
1642 * for that. State DASD_STATE_ONLINE is normal block device
1643 * operation.
1644 */
1645 if (basedev->state < DASD_STATE_READY)
1646 return;
1647 /* Now we try to fetch requests from the request queue */
1648 while (!blk_queue_plugged(queue) &&
1649 elv_next_request(queue)) {
1650
1651 req = elv_next_request(queue);
1652
1653 if (basedev->features & DASD_FEATURE_READONLY &&
1654 rq_data_dir(req) == WRITE) {
1655 DBF_DEV_EVENT(DBF_ERR, basedev,
1656 "Rejecting write request %p",
1657 req);
1658 blkdev_dequeue_request(req);
1659 dasd_end_request(req, 0);
1660 continue;
1661 }
1662 cqr = basedev->discipline->build_cp(basedev, block, req);
1663 if (IS_ERR(cqr)) {
1664 if (PTR_ERR(cqr) == -EBUSY)
1665 break; /* normal end condition */
1666 if (PTR_ERR(cqr) == -ENOMEM)
1667 break; /* terminate request queue loop */
1668 if (PTR_ERR(cqr) == -EAGAIN) {
1669 /*
1670 * The current request cannot be build right
1671 * now, we have to try later. If this request
1672 * is the head-of-queue we stop the device
1673 * for 1/2 second.
1674 */
1675 if (!list_empty(&block->ccw_queue))
1676 break;
1677 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags);
1678 basedev->stopped |= DASD_STOPPED_PENDING;
1679 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags);
1680 dasd_block_set_timer(block, HZ/2);
1681 break;
1682 }
1683 DBF_DEV_EVENT(DBF_ERR, basedev,
1684 "CCW creation failed (rc=%ld) "
1685 "on request %p",
1686 PTR_ERR(cqr), req);
1687 blkdev_dequeue_request(req);
1688 dasd_end_request(req, 0);
1689 continue;
1690 }
1691 /*
1692 * Note: callback is set to dasd_return_cqr_cb in
1693 * __dasd_block_start_head to cover erp requests as well
1694 */
1695 cqr->callback_data = (void *) req;
1696 cqr->status = DASD_CQR_FILLED;
1697 blkdev_dequeue_request(req);
1698 list_add_tail(&cqr->blocklist, &block->ccw_queue);
1699 dasd_profile_start(block, cqr, req);
1700 }
1701}
1702
1703static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1704{
1705 struct request *req;
1706 int status;
1707
1708 req = (struct request *) cqr->callback_data;
1709 dasd_profile_end(cqr->block, cqr, req);
1710 status = cqr->memdev->discipline->free_cp(cqr, req);
1711 dasd_end_request(req, status);
1712}
1713
1714/*
1715 * Process ccw request queue.
1716 */
1717static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1718 struct list_head *final_queue)
1719{
1720 struct list_head *l, *n;
1721 struct dasd_ccw_req *cqr;
1722 dasd_erp_fn_t erp_fn;
1723 unsigned long flags;
1724 struct dasd_device *base = block->base;
1725
1726restart:
1727 /* Process request with final status. */
1728 list_for_each_safe(l, n, &block->ccw_queue) {
1729 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1730 if (cqr->status != DASD_CQR_DONE &&
1731 cqr->status != DASD_CQR_FAILED &&
1732 cqr->status != DASD_CQR_NEED_ERP &&
1733 cqr->status != DASD_CQR_TERMINATED)
1734 continue;
1735
1736 if (cqr->status == DASD_CQR_TERMINATED) {
1737 base->discipline->handle_terminated_request(cqr);
1738 goto restart;
1739 }
1740
1741 /* Process requests that may be recovered */
1742 if (cqr->status == DASD_CQR_NEED_ERP) {
1743 if (cqr->irb.esw.esw0.erw.cons &&
1744 test_bit(DASD_CQR_FLAGS_USE_ERP,
1745 &cqr->flags)) {
1746 erp_fn = base->discipline->erp_action(cqr);
1747 erp_fn(cqr);
1748 }
1749 goto restart;
1750 }
1751
1752 /* First of all call extended error reporting. */
1753 if (dasd_eer_enabled(base) &&
1754 cqr->status == DASD_CQR_FAILED) {
1755 dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1756
1757 /* restart request */
1758 cqr->status = DASD_CQR_FILLED;
1759 cqr->retries = 255;
1760 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1761 base->stopped |= DASD_STOPPED_QUIESCE;
1762 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1763 flags);
1764 goto restart;
1765 }
1766
1767 /* Process finished ERP request. */
1768 if (cqr->refers) {
1769 __dasd_block_process_erp(block, cqr);
1770 goto restart;
1771 }
1772
1773 /* Rechain finished requests to final queue */
1774 cqr->endclk = get_clock();
1775 list_move_tail(&cqr->blocklist, final_queue);
1776 }
1777}
1778
1779static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1780{
1781 dasd_schedule_block_bh(cqr->block);
1782}
1783
1784static void __dasd_block_start_head(struct dasd_block *block)
1785{
1786 struct dasd_ccw_req *cqr;
1787
1788 if (list_empty(&block->ccw_queue))
1789 return;
1790 /* We allways begin with the first requests on the queue, as some
1791 * of previously started requests have to be enqueued on a
1792 * dasd_device again for error recovery.
1793 */
1794 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
1795 if (cqr->status != DASD_CQR_FILLED)
1796 continue;
1797 /* Non-temporary stop condition will trigger fail fast */
1798 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
1799 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1800 (!dasd_eer_enabled(block->base))) {
1801 cqr->status = DASD_CQR_FAILED;
1802 dasd_schedule_block_bh(block);
1803 continue;
1804 }
1805 /* Don't try to start requests if device is stopped */
1806 if (block->base->stopped)
1807 return;
1808
1809 /* just a fail safe check, should not happen */
1810 if (!cqr->startdev)
1811 cqr->startdev = block->base;
1812
1813 /* make sure that the requests we submit find their way back */
1814 cqr->callback = dasd_return_cqr_cb;
1815
1816 dasd_add_request_tail(cqr);
1817 }
1818}
1819
1820/*
1821 * Central dasd_block layer routine. Takes requests from the generic
1822 * block layer request queue, creates ccw requests, enqueues them on
1823 * a dasd_device and processes ccw requests that have been returned.
1824 */
1825static void dasd_block_tasklet(struct dasd_block *block)
1826{
1827 struct list_head final_queue;
1828 struct list_head *l, *n;
1829 struct dasd_ccw_req *cqr;
1830
1831 atomic_set(&block->tasklet_scheduled, 0);
1832 INIT_LIST_HEAD(&final_queue);
1833 spin_lock(&block->queue_lock);
1834 /* Finish off requests on ccw queue */
1835 __dasd_process_block_ccw_queue(block, &final_queue);
1836 spin_unlock(&block->queue_lock);
1837 /* Now call the callback function of requests with final status */
1838 spin_lock_irq(&block->request_queue_lock);
1839 list_for_each_safe(l, n, &final_queue) {
1840 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1841 list_del_init(&cqr->blocklist);
1842 __dasd_cleanup_cqr(cqr);
1843 }
1844 spin_lock(&block->queue_lock);
1845 /* Get new request from the block device request queue */
1846 __dasd_process_request_queue(block);
1847 /* Now check if the head of the ccw queue needs to be started. */
1848 __dasd_block_start_head(block);
1849 spin_unlock(&block->queue_lock);
1850 spin_unlock_irq(&block->request_queue_lock);
1851 dasd_put_device(block->base);
1852}
1853
1854static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
1855{
1856 wake_up(&dasd_flush_wq);
1857}
1858
1859/*
1860 * Go through all request on the dasd_block request queue, cancel them
1861 * on the respective dasd_device, and return them to the generic
1862 * block layer.
1863 */
1864static int dasd_flush_block_queue(struct dasd_block *block)
1865{
1866 struct dasd_ccw_req *cqr, *n;
1867 int rc, i;
1868 struct list_head flush_queue;
1869
1870 INIT_LIST_HEAD(&flush_queue);
1871 spin_lock_bh(&block->queue_lock);
1872 rc = 0;
1873restart:
1874 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
1875 /* if this request currently owned by a dasd_device cancel it */
1876 if (cqr->status >= DASD_CQR_QUEUED)
1877 rc = dasd_cancel_req(cqr);
1878 if (rc < 0)
1879 break;
1880 /* Rechain request (including erp chain) so it won't be
1881 * touched by the dasd_block_tasklet anymore.
1882 * Replace the callback so we notice when the request
1883 * is returned from the dasd_device layer.
1884 */
1885 cqr->callback = _dasd_wake_block_flush_cb;
1886 for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
1887 list_move_tail(&cqr->blocklist, &flush_queue);
1888 if (i > 1)
1889 /* moved more than one request - need to restart */
1890 goto restart;
1891 }
1892 spin_unlock_bh(&block->queue_lock);
1893 /* Now call the callback function of flushed requests */
1894restart_cb:
1895 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
1896 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
1897 /* Process finished ERP request. */
1898 if (cqr->refers) {
1899 __dasd_block_process_erp(block, cqr);
1900 /* restart list_for_xx loop since dasd_process_erp
1901 * might remove multiple elements */
1902 goto restart_cb;
1903 }
1904 /* call the callback function */
1905 cqr->endclk = get_clock();
1906 list_del_init(&cqr->blocklist);
1907 __dasd_cleanup_cqr(cqr);
1729 } 1908 }
1730 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1731 dasd_schedule_bh(device);
1732 return rc; 1909 return rc;
1733} 1910}
1734 1911
1735/* 1912/*
1736 * SECTION: Block device operations (request queue, partitions, open, release). 1913 * Schedules a call to dasd_tasklet over the device tasklet.
1914 */
1915void dasd_schedule_block_bh(struct dasd_block *block)
1916{
1917 /* Protect against rescheduling. */
1918 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
1919 return;
1920 /* life cycle of block is bound to it's base device */
1921 dasd_get_device(block->base);
1922 tasklet_hi_schedule(&block->tasklet);
1923}
1924
1925
1926/*
1927 * SECTION: external block device operations
1928 * (request queue handling, open, release, etc.)
1737 */ 1929 */
1738 1930
1739/* 1931/*
1740 * Dasd request queue function. Called from ll_rw_blk.c 1932 * Dasd request queue function. Called from ll_rw_blk.c
1741 */ 1933 */
1742static void 1934static void do_dasd_request(struct request_queue *queue)
1743do_dasd_request(struct request_queue * queue)
1744{ 1935{
1745 struct dasd_device *device; 1936 struct dasd_block *block;
1746 1937
1747 device = (struct dasd_device *) queue->queuedata; 1938 block = queue->queuedata;
1748 spin_lock(get_ccwdev_lock(device->cdev)); 1939 spin_lock(&block->queue_lock);
1749 /* Get new request from the block device request queue */ 1940 /* Get new request from the block device request queue */
1750 __dasd_process_blk_queue(device); 1941 __dasd_process_request_queue(block);
1751 /* Now check if the head of the ccw queue needs to be started. */ 1942 /* Now check if the head of the ccw queue needs to be started. */
1752 __dasd_start_head(device); 1943 __dasd_block_start_head(block);
1753 spin_unlock(get_ccwdev_lock(device->cdev)); 1944 spin_unlock(&block->queue_lock);
1754} 1945}
1755 1946
1756/* 1947/*
1757 * Allocate and initialize request queue and default I/O scheduler. 1948 * Allocate and initialize request queue and default I/O scheduler.
1758 */ 1949 */
1759static int 1950static int dasd_alloc_queue(struct dasd_block *block)
1760dasd_alloc_queue(struct dasd_device * device)
1761{ 1951{
1762 int rc; 1952 int rc;
1763 1953
1764 device->request_queue = blk_init_queue(do_dasd_request, 1954 block->request_queue = blk_init_queue(do_dasd_request,
1765 &device->request_queue_lock); 1955 &block->request_queue_lock);
1766 if (device->request_queue == NULL) 1956 if (block->request_queue == NULL)
1767 return -ENOMEM; 1957 return -ENOMEM;
1768 1958
1769 device->request_queue->queuedata = device; 1959 block->request_queue->queuedata = block;
1770 1960
1771 elevator_exit(device->request_queue->elevator); 1961 elevator_exit(block->request_queue->elevator);
1772 rc = elevator_init(device->request_queue, "deadline"); 1962 rc = elevator_init(block->request_queue, "deadline");
1773 if (rc) { 1963 if (rc) {
1774 blk_cleanup_queue(device->request_queue); 1964 blk_cleanup_queue(block->request_queue);
1775 return rc; 1965 return rc;
1776 } 1966 }
1777 return 0; 1967 return 0;
@@ -1780,79 +1970,76 @@ dasd_alloc_queue(struct dasd_device * device)
1780/* 1970/*
1781 * Allocate and initialize request queue. 1971 * Allocate and initialize request queue.
1782 */ 1972 */
1783static void 1973static void dasd_setup_queue(struct dasd_block *block)
1784dasd_setup_queue(struct dasd_device * device)
1785{ 1974{
1786 int max; 1975 int max;
1787 1976
1788 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1977 blk_queue_hardsect_size(block->request_queue, block->bp_block);
1789 max = device->discipline->max_blocks << device->s2b_shift; 1978 max = block->base->discipline->max_blocks << block->s2b_shift;
1790 blk_queue_max_sectors(device->request_queue, max); 1979 blk_queue_max_sectors(block->request_queue, max);
1791 blk_queue_max_phys_segments(device->request_queue, -1L); 1980 blk_queue_max_phys_segments(block->request_queue, -1L);
1792 blk_queue_max_hw_segments(device->request_queue, -1L); 1981 blk_queue_max_hw_segments(block->request_queue, -1L);
1793 blk_queue_max_segment_size(device->request_queue, -1L); 1982 blk_queue_max_segment_size(block->request_queue, -1L);
1794 blk_queue_segment_boundary(device->request_queue, -1L); 1983 blk_queue_segment_boundary(block->request_queue, -1L);
1795 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); 1984 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
1796} 1985}
1797 1986
1798/* 1987/*
1799 * Deactivate and free request queue. 1988 * Deactivate and free request queue.
1800 */ 1989 */
1801static void 1990static void dasd_free_queue(struct dasd_block *block)
1802dasd_free_queue(struct dasd_device * device)
1803{ 1991{
1804 if (device->request_queue) { 1992 if (block->request_queue) {
1805 blk_cleanup_queue(device->request_queue); 1993 blk_cleanup_queue(block->request_queue);
1806 device->request_queue = NULL; 1994 block->request_queue = NULL;
1807 } 1995 }
1808} 1996}
1809 1997
1810/* 1998/*
1811 * Flush request on the request queue. 1999 * Flush request on the request queue.
1812 */ 2000 */
1813static void 2001static void dasd_flush_request_queue(struct dasd_block *block)
1814dasd_flush_request_queue(struct dasd_device * device)
1815{ 2002{
1816 struct request *req; 2003 struct request *req;
1817 2004
1818 if (!device->request_queue) 2005 if (!block->request_queue)
1819 return; 2006 return;
1820 2007
1821 spin_lock_irq(&device->request_queue_lock); 2008 spin_lock_irq(&block->request_queue_lock);
1822 while ((req = elv_next_request(device->request_queue))) { 2009 while ((req = elv_next_request(block->request_queue))) {
1823 blkdev_dequeue_request(req); 2010 blkdev_dequeue_request(req);
1824 dasd_end_request(req, 0); 2011 dasd_end_request(req, 0);
1825 } 2012 }
1826 spin_unlock_irq(&device->request_queue_lock); 2013 spin_unlock_irq(&block->request_queue_lock);
1827} 2014}
1828 2015
1829static int 2016static int dasd_open(struct inode *inp, struct file *filp)
1830dasd_open(struct inode *inp, struct file *filp)
1831{ 2017{
1832 struct gendisk *disk = inp->i_bdev->bd_disk; 2018 struct gendisk *disk = inp->i_bdev->bd_disk;
1833 struct dasd_device *device = disk->private_data; 2019 struct dasd_block *block = disk->private_data;
2020 struct dasd_device *base = block->base;
1834 int rc; 2021 int rc;
1835 2022
1836 atomic_inc(&device->open_count); 2023 atomic_inc(&block->open_count);
1837 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2024 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
1838 rc = -ENODEV; 2025 rc = -ENODEV;
1839 goto unlock; 2026 goto unlock;
1840 } 2027 }
1841 2028
1842 if (!try_module_get(device->discipline->owner)) { 2029 if (!try_module_get(base->discipline->owner)) {
1843 rc = -EINVAL; 2030 rc = -EINVAL;
1844 goto unlock; 2031 goto unlock;
1845 } 2032 }
1846 2033
1847 if (dasd_probeonly) { 2034 if (dasd_probeonly) {
1848 DEV_MESSAGE(KERN_INFO, device, "%s", 2035 DEV_MESSAGE(KERN_INFO, base, "%s",
1849 "No access to device due to probeonly mode"); 2036 "No access to device due to probeonly mode");
1850 rc = -EPERM; 2037 rc = -EPERM;
1851 goto out; 2038 goto out;
1852 } 2039 }
1853 2040
1854 if (device->state <= DASD_STATE_BASIC) { 2041 if (base->state <= DASD_STATE_BASIC) {
1855 DBF_DEV_EVENT(DBF_ERR, device, " %s", 2042 DBF_DEV_EVENT(DBF_ERR, base, " %s",
1856 " Cannot open unrecognized device"); 2043 " Cannot open unrecognized device");
1857 rc = -ENODEV; 2044 rc = -ENODEV;
1858 goto out; 2045 goto out;
@@ -1861,41 +2048,41 @@ dasd_open(struct inode *inp, struct file *filp)
1861 return 0; 2048 return 0;
1862 2049
1863out: 2050out:
1864 module_put(device->discipline->owner); 2051 module_put(base->discipline->owner);
1865unlock: 2052unlock:
1866 atomic_dec(&device->open_count); 2053 atomic_dec(&block->open_count);
1867 return rc; 2054 return rc;
1868} 2055}
1869 2056
1870static int 2057static int dasd_release(struct inode *inp, struct file *filp)
1871dasd_release(struct inode *inp, struct file *filp)
1872{ 2058{
1873 struct gendisk *disk = inp->i_bdev->bd_disk; 2059 struct gendisk *disk = inp->i_bdev->bd_disk;
1874 struct dasd_device *device = disk->private_data; 2060 struct dasd_block *block = disk->private_data;
1875 2061
1876 atomic_dec(&device->open_count); 2062 atomic_dec(&block->open_count);
1877 module_put(device->discipline->owner); 2063 module_put(block->base->discipline->owner);
1878 return 0; 2064 return 0;
1879} 2065}
1880 2066
1881/* 2067/*
1882 * Return disk geometry. 2068 * Return disk geometry.
1883 */ 2069 */
1884static int 2070static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1885dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1886{ 2071{
1887 struct dasd_device *device; 2072 struct dasd_block *block;
2073 struct dasd_device *base;
1888 2074
1889 device = bdev->bd_disk->private_data; 2075 block = bdev->bd_disk->private_data;
1890 if (!device) 2076 base = block->base;
2077 if (!block)
1891 return -ENODEV; 2078 return -ENODEV;
1892 2079
1893 if (!device->discipline || 2080 if (!base->discipline ||
1894 !device->discipline->fill_geometry) 2081 !base->discipline->fill_geometry)
1895 return -EINVAL; 2082 return -EINVAL;
1896 2083
1897 device->discipline->fill_geometry(device, geo); 2084 base->discipline->fill_geometry(block, geo);
1898 geo->start = get_start_sect(bdev) >> device->s2b_shift; 2085 geo->start = get_start_sect(bdev) >> block->s2b_shift;
1899 return 0; 2086 return 0;
1900} 2087}
1901 2088
@@ -1909,6 +2096,9 @@ dasd_device_operations = {
1909 .getgeo = dasd_getgeo, 2096 .getgeo = dasd_getgeo,
1910}; 2097};
1911 2098
2099/*******************************************************************************
2100 * end of block device operations
2101 */
1912 2102
1913static void 2103static void
1914dasd_exit(void) 2104dasd_exit(void)
@@ -1937,9 +2127,8 @@ dasd_exit(void)
1937 * Initial attempt at a probe function. this can be simplified once 2127 * Initial attempt at a probe function. this can be simplified once
1938 * the other detection code is gone. 2128 * the other detection code is gone.
1939 */ 2129 */
1940int 2130int dasd_generic_probe(struct ccw_device *cdev,
1941dasd_generic_probe (struct ccw_device *cdev, 2131 struct dasd_discipline *discipline)
1942 struct dasd_discipline *discipline)
1943{ 2132{
1944 int ret; 2133 int ret;
1945 2134
@@ -1979,10 +2168,10 @@ dasd_generic_probe (struct ccw_device *cdev,
1979 * This will one day be called from a global not_oper handler. 2168 * This will one day be called from a global not_oper handler.
1980 * It is also used by driver_unregister during module unload. 2169 * It is also used by driver_unregister during module unload.
1981 */ 2170 */
1982void 2171void dasd_generic_remove(struct ccw_device *cdev)
1983dasd_generic_remove (struct ccw_device *cdev)
1984{ 2172{
1985 struct dasd_device *device; 2173 struct dasd_device *device;
2174 struct dasd_block *block;
1986 2175
1987 cdev->handler = NULL; 2176 cdev->handler = NULL;
1988 2177
@@ -2002,7 +2191,15 @@ dasd_generic_remove (struct ccw_device *cdev)
2002 */ 2191 */
2003 dasd_set_target_state(device, DASD_STATE_NEW); 2192 dasd_set_target_state(device, DASD_STATE_NEW);
2004 /* dasd_delete_device destroys the device reference. */ 2193 /* dasd_delete_device destroys the device reference. */
2194 block = device->block;
2195 device->block = NULL;
2005 dasd_delete_device(device); 2196 dasd_delete_device(device);
2197 /*
2198 * life cycle of block is bound to device, so delete it after
2199 * device was safely removed
2200 */
2201 if (block)
2202 dasd_free_block(block);
2006} 2203}
2007 2204
2008/* 2205/*
@@ -2010,10 +2207,8 @@ dasd_generic_remove (struct ccw_device *cdev)
2010 * the device is detected for the first time and is supposed to be used 2207 * the device is detected for the first time and is supposed to be used
2011 * or the user has started activation through sysfs. 2208 * or the user has started activation through sysfs.
2012 */ 2209 */
2013int 2210int dasd_generic_set_online(struct ccw_device *cdev,
2014dasd_generic_set_online (struct ccw_device *cdev, 2211 struct dasd_discipline *base_discipline)
2015 struct dasd_discipline *base_discipline)
2016
2017{ 2212{
2018 struct dasd_discipline *discipline; 2213 struct dasd_discipline *discipline;
2019 struct dasd_device *device; 2214 struct dasd_device *device;
@@ -2049,6 +2244,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
2049 device->base_discipline = base_discipline; 2244 device->base_discipline = base_discipline;
2050 device->discipline = discipline; 2245 device->discipline = discipline;
2051 2246
2247 /* check_device will allocate block device if necessary */
2052 rc = discipline->check_device(device); 2248 rc = discipline->check_device(device);
2053 if (rc) { 2249 if (rc) {
2054 printk (KERN_WARNING 2250 printk (KERN_WARNING
@@ -2068,6 +2264,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
2068 cdev->dev.bus_id); 2264 cdev->dev.bus_id);
2069 rc = -ENODEV; 2265 rc = -ENODEV;
2070 dasd_set_target_state(device, DASD_STATE_NEW); 2266 dasd_set_target_state(device, DASD_STATE_NEW);
2267 if (device->block)
2268 dasd_free_block(device->block);
2071 dasd_delete_device(device); 2269 dasd_delete_device(device);
2072 } else 2270 } else
2073 pr_debug("dasd_generic device %s found\n", 2271 pr_debug("dasd_generic device %s found\n",
@@ -2082,10 +2280,10 @@ dasd_generic_set_online (struct ccw_device *cdev,
2082 return rc; 2280 return rc;
2083} 2281}
2084 2282
2085int 2283int dasd_generic_set_offline(struct ccw_device *cdev)
2086dasd_generic_set_offline (struct ccw_device *cdev)
2087{ 2284{
2088 struct dasd_device *device; 2285 struct dasd_device *device;
2286 struct dasd_block *block;
2089 int max_count, open_count; 2287 int max_count, open_count;
2090 2288
2091 device = dasd_device_from_cdev(cdev); 2289 device = dasd_device_from_cdev(cdev);
@@ -2102,30 +2300,39 @@ dasd_generic_set_offline (struct ccw_device *cdev)
2102 * the blkdev_get in dasd_scan_partitions. We are only interested 2300 * the blkdev_get in dasd_scan_partitions. We are only interested
2103 * in the other openers. 2301 * in the other openers.
2104 */ 2302 */
2105 max_count = device->bdev ? 0 : -1; 2303 if (device->block) {
2106 open_count = (int) atomic_read(&device->open_count); 2304 struct dasd_block *block = device->block;
2107 if (open_count > max_count) { 2305 max_count = block->bdev ? 0 : -1;
2108 if (open_count > 0) 2306 open_count = (int) atomic_read(&block->open_count);
2109 printk (KERN_WARNING "Can't offline dasd device with " 2307 if (open_count > max_count) {
2110 "open count = %i.\n", 2308 if (open_count > 0)
2111 open_count); 2309 printk(KERN_WARNING "Can't offline dasd "
2112 else 2310 "device with open count = %i.\n",
2113 printk (KERN_WARNING "%s", 2311 open_count);
2114 "Can't offline dasd device due to internal " 2312 else
2115 "use\n"); 2313 printk(KERN_WARNING "%s",
2116 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2314 "Can't offline dasd device due "
2117 dasd_put_device(device); 2315 "to internal use\n");
2118 return -EBUSY; 2316 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2317 dasd_put_device(device);
2318 return -EBUSY;
2319 }
2119 } 2320 }
2120 dasd_set_target_state(device, DASD_STATE_NEW); 2321 dasd_set_target_state(device, DASD_STATE_NEW);
2121 /* dasd_delete_device destroys the device reference. */ 2322 /* dasd_delete_device destroys the device reference. */
2323 block = device->block;
2324 device->block = NULL;
2122 dasd_delete_device(device); 2325 dasd_delete_device(device);
2123 2326 /*
2327 * life cycle of block is bound to device, so delete it after
2328 * device was safely removed
2329 */
2330 if (block)
2331 dasd_free_block(block);
2124 return 0; 2332 return 0;
2125} 2333}
2126 2334
2127int 2335int dasd_generic_notify(struct ccw_device *cdev, int event)
2128dasd_generic_notify(struct ccw_device *cdev, int event)
2129{ 2336{
2130 struct dasd_device *device; 2337 struct dasd_device *device;
2131 struct dasd_ccw_req *cqr; 2338 struct dasd_ccw_req *cqr;
@@ -2146,27 +2353,22 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
2146 if (device->state < DASD_STATE_BASIC) 2353 if (device->state < DASD_STATE_BASIC)
2147 break; 2354 break;
2148 /* Device is active. We want to keep it. */ 2355 /* Device is active. We want to keep it. */
2149 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2356 list_for_each_entry(cqr, &device->ccw_queue, devlist)
2150 list_for_each_entry(cqr, &device->ccw_queue, list) 2357 if (cqr->status == DASD_CQR_IN_IO) {
2151 if (cqr->status == DASD_CQR_IN_IO) 2358 cqr->status = DASD_CQR_QUEUED;
2152 cqr->status = DASD_CQR_FAILED; 2359 cqr->retries++;
2153 device->stopped |= DASD_STOPPED_DC_EIO; 2360 }
2154 } else { 2361 device->stopped |= DASD_STOPPED_DC_WAIT;
2155 list_for_each_entry(cqr, &device->ccw_queue, list) 2362 dasd_device_clear_timer(device);
2156 if (cqr->status == DASD_CQR_IN_IO) { 2363 dasd_schedule_device_bh(device);
2157 cqr->status = DASD_CQR_QUEUED;
2158 cqr->retries++;
2159 }
2160 device->stopped |= DASD_STOPPED_DC_WAIT;
2161 dasd_set_timer(device, 0);
2162 }
2163 dasd_schedule_bh(device);
2164 ret = 1; 2364 ret = 1;
2165 break; 2365 break;
2166 case CIO_OPER: 2366 case CIO_OPER:
2167 /* FIXME: add a sanity check. */ 2367 /* FIXME: add a sanity check. */
2168 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2368 device->stopped &= ~DASD_STOPPED_DC_WAIT;
2169 dasd_schedule_bh(device); 2369 dasd_schedule_device_bh(device);
2370 if (device->block)
2371 dasd_schedule_block_bh(device->block);
2170 ret = 1; 2372 ret = 1;
2171 break; 2373 break;
2172 } 2374 }
@@ -2196,7 +2398,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2196 ccw->cda = (__u32)(addr_t)rdc_buffer; 2398 ccw->cda = (__u32)(addr_t)rdc_buffer;
2197 ccw->count = rdc_buffer_size; 2399 ccw->count = rdc_buffer_size;
2198 2400
2199 cqr->device = device; 2401 cqr->startdev = device;
2402 cqr->memdev = device;
2200 cqr->expires = 10*HZ; 2403 cqr->expires = 10*HZ;
2201 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2404 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2202 cqr->retries = 2; 2405 cqr->retries = 2;
@@ -2218,13 +2421,12 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2218 return PTR_ERR(cqr); 2421 return PTR_ERR(cqr);
2219 2422
2220 ret = dasd_sleep_on(cqr); 2423 ret = dasd_sleep_on(cqr);
2221 dasd_sfree_request(cqr, cqr->device); 2424 dasd_sfree_request(cqr, cqr->memdev);
2222 return ret; 2425 return ret;
2223} 2426}
2224EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2427EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2225 2428
2226static int __init 2429static int __init dasd_init(void)
2227dasd_init(void)
2228{ 2430{
2229 int rc; 2431 int rc;
2230 2432
@@ -2232,7 +2434,7 @@ dasd_init(void)
2232 init_waitqueue_head(&dasd_flush_wq); 2434 init_waitqueue_head(&dasd_flush_wq);
2233 2435
2234 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2436 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2235 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2437 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof(long));
2236 if (dasd_debug_area == NULL) { 2438 if (dasd_debug_area == NULL) {
2237 rc = -ENOMEM; 2439 rc = -ENOMEM;
2238 goto failed; 2440 goto failed;
@@ -2278,15 +2480,18 @@ EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2278EXPORT_SYMBOL(dasd_add_request_head); 2480EXPORT_SYMBOL(dasd_add_request_head);
2279EXPORT_SYMBOL(dasd_add_request_tail); 2481EXPORT_SYMBOL(dasd_add_request_tail);
2280EXPORT_SYMBOL(dasd_cancel_req); 2482EXPORT_SYMBOL(dasd_cancel_req);
2281EXPORT_SYMBOL(dasd_clear_timer); 2483EXPORT_SYMBOL(dasd_device_clear_timer);
2484EXPORT_SYMBOL(dasd_block_clear_timer);
2282EXPORT_SYMBOL(dasd_enable_device); 2485EXPORT_SYMBOL(dasd_enable_device);
2283EXPORT_SYMBOL(dasd_int_handler); 2486EXPORT_SYMBOL(dasd_int_handler);
2284EXPORT_SYMBOL(dasd_kfree_request); 2487EXPORT_SYMBOL(dasd_kfree_request);
2285EXPORT_SYMBOL(dasd_kick_device); 2488EXPORT_SYMBOL(dasd_kick_device);
2286EXPORT_SYMBOL(dasd_kmalloc_request); 2489EXPORT_SYMBOL(dasd_kmalloc_request);
2287EXPORT_SYMBOL(dasd_schedule_bh); 2490EXPORT_SYMBOL(dasd_schedule_device_bh);
2491EXPORT_SYMBOL(dasd_schedule_block_bh);
2288EXPORT_SYMBOL(dasd_set_target_state); 2492EXPORT_SYMBOL(dasd_set_target_state);
2289EXPORT_SYMBOL(dasd_set_timer); 2493EXPORT_SYMBOL(dasd_device_set_timer);
2494EXPORT_SYMBOL(dasd_block_set_timer);
2290EXPORT_SYMBOL(dasd_sfree_request); 2495EXPORT_SYMBOL(dasd_sfree_request);
2291EXPORT_SYMBOL(dasd_sleep_on); 2496EXPORT_SYMBOL(dasd_sleep_on);
2292EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2497EXPORT_SYMBOL(dasd_sleep_on_immediatly);
@@ -2300,4 +2505,7 @@ EXPORT_SYMBOL_GPL(dasd_generic_remove);
2300EXPORT_SYMBOL_GPL(dasd_generic_notify); 2505EXPORT_SYMBOL_GPL(dasd_generic_notify);
2301EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2506EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2302EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2507EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2303 2508EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2509EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2510EXPORT_SYMBOL_GPL(dasd_alloc_block);
2511EXPORT_SYMBOL_GPL(dasd_free_block);
diff --git a/drivers/s390/block/dasd_3370_erp.c b/drivers/s390/block/dasd_3370_erp.c
deleted file mode 100644
index 1ddab8991d92..000000000000
--- a/drivers/s390/block/dasd_3370_erp.c
+++ /dev/null
@@ -1,84 +0,0 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_3370_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 */
8
9#define PRINTK_HEADER "dasd_erp(3370)"
10
11#include "dasd_int.h"
12
13
14/*
15 * DASD_3370_ERP_EXAMINE
16 *
17 * DESCRIPTION
18 * Checks only for fatal/no/recover error.
19 * A detailed examination of the sense data is done later outside
20 * the interrupt handler.
21 *
22 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
23 * 'Chapter 7. 3370 Sense Data'.
24 *
25 * RETURN VALUES
26 * dasd_era_none no error
27 * dasd_era_fatal for all fatal (unrecoverable errors)
28 * dasd_era_recover for all others.
29 */
30dasd_era_t
31dasd_3370_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
32{
33 char *sense = irb->ecw;
34
35 /* check for successful execution first */
36 if (irb->scsw.cstat == 0x00 &&
37 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
38 return dasd_era_none;
39 if (sense[0] & 0x80) { /* CMD reject */
40 return dasd_era_fatal;
41 }
42 if (sense[0] & 0x40) { /* Drive offline */
43 return dasd_era_recover;
44 }
45 if (sense[0] & 0x20) { /* Bus out parity */
46 return dasd_era_recover;
47 }
48 if (sense[0] & 0x10) { /* equipment check */
49 if (sense[1] & 0x80) {
50 return dasd_era_fatal;
51 }
52 return dasd_era_recover;
53 }
54 if (sense[0] & 0x08) { /* data check */
55 if (sense[1] & 0x80) {
56 return dasd_era_fatal;
57 }
58 return dasd_era_recover;
59 }
60 if (sense[0] & 0x04) { /* overrun */
61 if (sense[1] & 0x80) {
62 return dasd_era_fatal;
63 }
64 return dasd_era_recover;
65 }
66 if (sense[1] & 0x40) { /* invalid blocksize */
67 return dasd_era_fatal;
68 }
69 if (sense[1] & 0x04) { /* file protected */
70 return dasd_era_recover;
71 }
72 if (sense[1] & 0x01) { /* operation incomplete */
73 return dasd_era_recover;
74 }
75 if (sense[2] & 0x80) { /* check data erroor */
76 return dasd_era_recover;
77 }
78 if (sense[2] & 0x10) { /* Env. data present */
79 return dasd_era_recover;
80 }
81 /* examine the 24 byte sense data */
82 return dasd_era_recover;
83
84} /* END dasd_3370_erp_examine */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c02f960eae15..c361ab69ec00 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -26,158 +26,6 @@ struct DCTL_data {
26 26
27/* 27/*
28 ***************************************************************************** 28 *****************************************************************************
29 * SECTION ERP EXAMINATION
30 *****************************************************************************
31 */
32
33/*
34 * DASD_3990_ERP_EXAMINE_24
35 *
36 * DESCRIPTION
37 * Checks only for fatal (unrecoverable) error.
38 * A detailed examination of the sense data is done later outside
39 * the interrupt handler.
40 *
41 * Each bit configuration leading to an action code 2 (Exit with
42 * programming error or unusual condition indication)
43 * are handled as fatal errors.
44 *
45 * All other configurations are handled as recoverable errors.
46 *
47 * RETURN VALUES
48 * dasd_era_fatal for all fatal (unrecoverable errors)
49 * dasd_era_recover for all others.
50 */
51static dasd_era_t
52dasd_3990_erp_examine_24(struct dasd_ccw_req * cqr, char *sense)
53{
54
55 struct dasd_device *device = cqr->device;
56
57 /* check for 'Command Reject' */
58 if ((sense[0] & SNS0_CMD_REJECT) &&
59 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
60
61 DEV_MESSAGE(KERN_ERR, device, "%s",
62 "EXAMINE 24: Command Reject detected - "
63 "fatal error");
64
65 return dasd_era_fatal;
66 }
67
68 /* check for 'Invalid Track Format' */
69 if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
70 (!(sense[2] & SNS2_ENV_DATA_PRESENT))) {
71
72 DEV_MESSAGE(KERN_ERR, device, "%s",
73 "EXAMINE 24: Invalid Track Format detected "
74 "- fatal error");
75
76 return dasd_era_fatal;
77 }
78
79 /* check for 'No Record Found' */
80 if (sense[1] & SNS1_NO_REC_FOUND) {
81
82 /* FIXME: fatal error ?!? */
83 DEV_MESSAGE(KERN_ERR, device,
84 "EXAMINE 24: No Record Found detected %s",
85 device->state <= DASD_STATE_BASIC ?
86 " " : "- fatal error");
87
88 return dasd_era_fatal;
89 }
90
91 /* return recoverable for all others */
92 return dasd_era_recover;
93} /* END dasd_3990_erp_examine_24 */
94
95/*
96 * DASD_3990_ERP_EXAMINE_32
97 *
98 * DESCRIPTION
99 * Checks only for fatal/no/recoverable error.
100 * A detailed examination of the sense data is done later outside
101 * the interrupt handler.
102 *
103 * RETURN VALUES
104 * dasd_era_none no error
105 * dasd_era_fatal for all fatal (unrecoverable errors)
106 * dasd_era_recover for recoverable others.
107 */
108static dasd_era_t
109dasd_3990_erp_examine_32(struct dasd_ccw_req * cqr, char *sense)
110{
111
112 struct dasd_device *device = cqr->device;
113
114 switch (sense[25]) {
115 case 0x00:
116 return dasd_era_none;
117
118 case 0x01:
119 DEV_MESSAGE(KERN_ERR, device, "%s", "EXAMINE 32: fatal error");
120
121 return dasd_era_fatal;
122
123 default:
124
125 return dasd_era_recover;
126 }
127
128} /* end dasd_3990_erp_examine_32 */
129
130/*
131 * DASD_3990_ERP_EXAMINE
132 *
133 * DESCRIPTION
134 * Checks only for fatal/no/recover error.
135 * A detailed examination of the sense data is done later outside
136 * the interrupt handler.
137 *
138 * The logic is based on the 'IBM 3990 Storage Control Reference' manual
139 * 'Chapter 7. Error Recovery Procedures'.
140 *
141 * RETURN VALUES
142 * dasd_era_none no error
143 * dasd_era_fatal for all fatal (unrecoverable errors)
144 * dasd_era_recover for all others.
145 */
146dasd_era_t
147dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
148{
149
150 char *sense = irb->ecw;
151 dasd_era_t era = dasd_era_recover;
152 struct dasd_device *device = cqr->device;
153
154 /* check for successful execution first */
155 if (irb->scsw.cstat == 0x00 &&
156 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
157 return dasd_era_none;
158
159 /* distinguish between 24 and 32 byte sense data */
160 if (sense[27] & DASD_SENSE_BIT_0) {
161
162 era = dasd_3990_erp_examine_24(cqr, sense);
163
164 } else {
165
166 era = dasd_3990_erp_examine_32(cqr, sense);
167
168 }
169
170 /* log the erp chain if fatal error occurred */
171 if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) {
172 dasd_log_sense(cqr, irb);
173 }
174
175 return era;
176
177} /* END dasd_3990_erp_examine */
178
179/*
180 *****************************************************************************
181 * SECTION ERP HANDLING 29 * SECTION ERP HANDLING
182 ***************************************************************************** 30 *****************************************************************************
183 */ 31 */
@@ -206,7 +54,7 @@ dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
206{ 54{
207 struct dasd_ccw_req *cqr = erp->refers; 55 struct dasd_ccw_req *cqr = erp->refers;
208 56
209 dasd_free_erp_request(erp, erp->device); 57 dasd_free_erp_request(erp, erp->memdev);
210 cqr->status = final_status; 58 cqr->status = final_status;
211 return cqr; 59 return cqr;
212 60
@@ -224,15 +72,17 @@ static void
224dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires) 72dasd_3990_erp_block_queue(struct dasd_ccw_req * erp, int expires)
225{ 73{
226 74
227 struct dasd_device *device = erp->device; 75 struct dasd_device *device = erp->startdev;
76 unsigned long flags;
228 77
229 DEV_MESSAGE(KERN_INFO, device, 78 DEV_MESSAGE(KERN_INFO, device,
230 "blocking request queue for %is", expires/HZ); 79 "blocking request queue for %is", expires/HZ);
231 80
81 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
232 device->stopped |= DASD_STOPPED_PENDING; 82 device->stopped |= DASD_STOPPED_PENDING;
233 erp->status = DASD_CQR_QUEUED; 83 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
234 84 erp->status = DASD_CQR_FILLED;
235 dasd_set_timer(device, expires); 85 dasd_block_set_timer(device->block, expires);
236} 86}
237 87
238/* 88/*
@@ -251,7 +101,7 @@ static struct dasd_ccw_req *
251dasd_3990_erp_int_req(struct dasd_ccw_req * erp) 101dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
252{ 102{
253 103
254 struct dasd_device *device = erp->device; 104 struct dasd_device *device = erp->startdev;
255 105
256 /* first time set initial retry counter and erp_function */ 106 /* first time set initial retry counter and erp_function */
257 /* and retry once without blocking queue */ 107 /* and retry once without blocking queue */
@@ -292,11 +142,14 @@ dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
292static void 142static void
293dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp) 143dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
294{ 144{
295 struct dasd_device *device = erp->device; 145 struct dasd_device *device = erp->startdev;
296 __u8 opm; 146 __u8 opm;
147 unsigned long flags;
297 148
298 /* try alternate valid path */ 149 /* try alternate valid path */
150 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
299 opm = ccw_device_get_path_mask(device->cdev); 151 opm = ccw_device_get_path_mask(device->cdev);
152 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
300 //FIXME: start with get_opm ? 153 //FIXME: start with get_opm ?
301 if (erp->lpm == 0) 154 if (erp->lpm == 0)
302 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum); 155 erp->lpm = LPM_ANYPATH & ~(erp->irb.esw.esw0.sublog.lpum);
@@ -309,9 +162,8 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
309 "try alternate lpm=%x (lpum=%x / opm=%x)", 162 "try alternate lpm=%x (lpum=%x / opm=%x)",
310 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm); 163 erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
311 164
312 /* reset status to queued to handle the request again... */ 165 /* reset status to submit the request again... */
313 if (erp->status > DASD_CQR_QUEUED) 166 erp->status = DASD_CQR_FILLED;
314 erp->status = DASD_CQR_QUEUED;
315 erp->retries = 1; 167 erp->retries = 1;
316 } else { 168 } else {
317 DEV_MESSAGE(KERN_ERR, device, 169 DEV_MESSAGE(KERN_ERR, device,
@@ -320,8 +172,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
320 erp->irb.esw.esw0.sublog.lpum, opm); 172 erp->irb.esw.esw0.sublog.lpum, opm);
321 173
322 /* post request with permanent error */ 174 /* post request with permanent error */
323 if (erp->status > DASD_CQR_QUEUED) 175 erp->status = DASD_CQR_FAILED;
324 erp->status = DASD_CQR_FAILED;
325 } 176 }
326} /* end dasd_3990_erp_alternate_path */ 177} /* end dasd_3990_erp_alternate_path */
327 178
@@ -344,14 +195,14 @@ static struct dasd_ccw_req *
344dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier) 195dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
345{ 196{
346 197
347 struct dasd_device *device = erp->device; 198 struct dasd_device *device = erp->startdev;
348 struct DCTL_data *DCTL_data; 199 struct DCTL_data *DCTL_data;
349 struct ccw1 *ccw; 200 struct ccw1 *ccw;
350 struct dasd_ccw_req *dctl_cqr; 201 struct dasd_ccw_req *dctl_cqr;
351 202
352 dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1, 203 dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
353 sizeof (struct DCTL_data), 204 sizeof(struct DCTL_data),
354 erp->device); 205 device);
355 if (IS_ERR(dctl_cqr)) { 206 if (IS_ERR(dctl_cqr)) {
356 DEV_MESSAGE(KERN_ERR, device, "%s", 207 DEV_MESSAGE(KERN_ERR, device, "%s",
357 "Unable to allocate DCTL-CQR"); 208 "Unable to allocate DCTL-CQR");
@@ -365,13 +216,14 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
365 DCTL_data->modifier = modifier; 216 DCTL_data->modifier = modifier;
366 217
367 ccw = dctl_cqr->cpaddr; 218 ccw = dctl_cqr->cpaddr;
368 memset(ccw, 0, sizeof (struct ccw1)); 219 memset(ccw, 0, sizeof(struct ccw1));
369 ccw->cmd_code = CCW_CMD_DCTL; 220 ccw->cmd_code = CCW_CMD_DCTL;
370 ccw->count = 4; 221 ccw->count = 4;
371 ccw->cda = (__u32)(addr_t) DCTL_data; 222 ccw->cda = (__u32)(addr_t) DCTL_data;
372 dctl_cqr->function = dasd_3990_erp_DCTL; 223 dctl_cqr->function = dasd_3990_erp_DCTL;
373 dctl_cqr->refers = erp; 224 dctl_cqr->refers = erp;
374 dctl_cqr->device = erp->device; 225 dctl_cqr->startdev = device;
226 dctl_cqr->memdev = device;
375 dctl_cqr->magic = erp->magic; 227 dctl_cqr->magic = erp->magic;
376 dctl_cqr->expires = 5 * 60 * HZ; 228 dctl_cqr->expires = 5 * 60 * HZ;
377 dctl_cqr->retries = 2; 229 dctl_cqr->retries = 2;
@@ -435,7 +287,7 @@ static struct dasd_ccw_req *
435dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense) 287dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
436{ 288{
437 289
438 struct dasd_device *device = erp->device; 290 struct dasd_device *device = erp->startdev;
439 291
440 /* first time set initial retry counter and erp_function */ 292 /* first time set initial retry counter and erp_function */
441 /* and retry once without waiting for state change pending */ 293 /* and retry once without waiting for state change pending */
@@ -472,7 +324,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
472 "redriving request immediately, " 324 "redriving request immediately, "
473 "%d retries left", 325 "%d retries left",
474 erp->retries); 326 erp->retries);
475 erp->status = DASD_CQR_QUEUED; 327 erp->status = DASD_CQR_FILLED;
476 } 328 }
477 } 329 }
478 330
@@ -530,7 +382,7 @@ static void
530dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) 382dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
531{ 383{
532 384
533 struct dasd_device *device = erp->device; 385 struct dasd_device *device = erp->startdev;
534 char msg_format = (sense[7] & 0xF0); 386 char msg_format = (sense[7] & 0xF0);
535 char msg_no = (sense[7] & 0x0F); 387 char msg_no = (sense[7] & 0x0F);
536 388
@@ -1157,7 +1009,7 @@ static struct dasd_ccw_req *
1157dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) 1009dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
1158{ 1010{
1159 1011
1160 struct dasd_device *device = erp->device; 1012 struct dasd_device *device = erp->startdev;
1161 1013
1162 erp->function = dasd_3990_erp_com_rej; 1014 erp->function = dasd_3990_erp_com_rej;
1163 1015
@@ -1198,7 +1050,7 @@ static struct dasd_ccw_req *
1198dasd_3990_erp_bus_out(struct dasd_ccw_req * erp) 1050dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
1199{ 1051{
1200 1052
1201 struct dasd_device *device = erp->device; 1053 struct dasd_device *device = erp->startdev;
1202 1054
1203 /* first time set initial retry counter and erp_function */ 1055 /* first time set initial retry counter and erp_function */
1204 /* and retry once without blocking queue */ 1056 /* and retry once without blocking queue */
@@ -1237,7 +1089,7 @@ static struct dasd_ccw_req *
1237dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense) 1089dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1238{ 1090{
1239 1091
1240 struct dasd_device *device = erp->device; 1092 struct dasd_device *device = erp->startdev;
1241 1093
1242 erp->function = dasd_3990_erp_equip_check; 1094 erp->function = dasd_3990_erp_equip_check;
1243 1095
@@ -1279,7 +1131,6 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
1279 1131
1280 erp = dasd_3990_erp_action_5(erp); 1132 erp = dasd_3990_erp_action_5(erp);
1281 } 1133 }
1282
1283 return erp; 1134 return erp;
1284 1135
1285} /* end dasd_3990_erp_equip_check */ 1136} /* end dasd_3990_erp_equip_check */
@@ -1299,7 +1150,7 @@ static struct dasd_ccw_req *
1299dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense) 1150dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
1300{ 1151{
1301 1152
1302 struct dasd_device *device = erp->device; 1153 struct dasd_device *device = erp->startdev;
1303 1154
1304 erp->function = dasd_3990_erp_data_check; 1155 erp->function = dasd_3990_erp_data_check;
1305 1156
@@ -1358,7 +1209,7 @@ static struct dasd_ccw_req *
1358dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense) 1209dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
1359{ 1210{
1360 1211
1361 struct dasd_device *device = erp->device; 1212 struct dasd_device *device = erp->startdev;
1362 1213
1363 erp->function = dasd_3990_erp_overrun; 1214 erp->function = dasd_3990_erp_overrun;
1364 1215
@@ -1387,7 +1238,7 @@ static struct dasd_ccw_req *
1387dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense) 1238dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1388{ 1239{
1389 1240
1390 struct dasd_device *device = erp->device; 1241 struct dasd_device *device = erp->startdev;
1391 1242
1392 erp->function = dasd_3990_erp_inv_format; 1243 erp->function = dasd_3990_erp_inv_format;
1393 1244
@@ -1403,8 +1254,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
1403 1254
1404 } else { 1255 } else {
1405 DEV_MESSAGE(KERN_ERR, device, "%s", 1256 DEV_MESSAGE(KERN_ERR, device, "%s",
1406 "Invalid Track Format - Fatal error should have " 1257 "Invalid Track Format - Fatal error");
1407 "been handled within the interrupt handler");
1408 1258
1409 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 1259 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
1410 } 1260 }
@@ -1428,7 +1278,7 @@ static struct dasd_ccw_req *
1428dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense) 1278dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
1429{ 1279{
1430 1280
1431 struct dasd_device *device = default_erp->device; 1281 struct dasd_device *device = default_erp->startdev;
1432 1282
1433 DEV_MESSAGE(KERN_ERR, device, "%s", 1283 DEV_MESSAGE(KERN_ERR, device, "%s",
1434 "End-of-Cylinder - must never happen"); 1284 "End-of-Cylinder - must never happen");
@@ -1453,7 +1303,7 @@ static struct dasd_ccw_req *
1453dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense) 1303dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
1454{ 1304{
1455 1305
1456 struct dasd_device *device = erp->device; 1306 struct dasd_device *device = erp->startdev;
1457 1307
1458 erp->function = dasd_3990_erp_env_data; 1308 erp->function = dasd_3990_erp_env_data;
1459 1309
@@ -1463,11 +1313,9 @@ dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
1463 1313
1464 /* don't retry on disabled interface */ 1314 /* don't retry on disabled interface */
1465 if (sense[7] != 0x0F) { 1315 if (sense[7] != 0x0F) {
1466
1467 erp = dasd_3990_erp_action_4(erp, sense); 1316 erp = dasd_3990_erp_action_4(erp, sense);
1468 } else { 1317 } else {
1469 1318 erp->status = DASD_CQR_FILLED;
1470 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_IN_IO);
1471 } 1319 }
1472 1320
1473 return erp; 1321 return erp;
@@ -1490,11 +1338,10 @@ static struct dasd_ccw_req *
1490dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense) 1338dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
1491{ 1339{
1492 1340
1493 struct dasd_device *device = default_erp->device; 1341 struct dasd_device *device = default_erp->startdev;
1494 1342
1495 DEV_MESSAGE(KERN_ERR, device, "%s", 1343 DEV_MESSAGE(KERN_ERR, device, "%s",
1496 "No Record Found - Fatal error should " 1344 "No Record Found - Fatal error ");
1497 "have been handled within the interrupt handler");
1498 1345
1499 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED); 1346 return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
1500 1347
@@ -1517,7 +1364,7 @@ static struct dasd_ccw_req *
1517dasd_3990_erp_file_prot(struct dasd_ccw_req * erp) 1364dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
1518{ 1365{
1519 1366
1520 struct dasd_device *device = erp->device; 1367 struct dasd_device *device = erp->startdev;
1521 1368
1522 DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected"); 1369 DEV_MESSAGE(KERN_ERR, device, "%s", "File Protected");
1523 1370
@@ -1526,6 +1373,43 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
1526} /* end dasd_3990_erp_file_prot */ 1373} /* end dasd_3990_erp_file_prot */
1527 1374
1528/* 1375/*
1376 * DASD_3990_ERP_INSPECT_ALIAS
1377 *
1378 * DESCRIPTION
1379 * Checks if the original request was started on an alias device.
1380 * If yes, it modifies the original and the erp request so that
1381 * the erp request can be started on a base device.
1382 *
1383 * PARAMETER
1384 * erp pointer to the currently created default ERP
1385 *
1386 * RETURN VALUES
1387 * erp pointer to the modified ERP, or NULL
1388 */
1389
1390static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
1391 struct dasd_ccw_req *erp)
1392{
1393 struct dasd_ccw_req *cqr = erp->refers;
1394
1395 if (cqr->block &&
1396 (cqr->block->base != cqr->startdev)) {
1397 if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
1398 DEV_MESSAGE(KERN_ERR, cqr->startdev,
1399 "ERP on alias device for request %p,"
1400 " recover on base device %s", cqr,
1401 cqr->block->base->cdev->dev.bus_id);
1402 }
1403 dasd_eckd_reset_ccw_to_base_io(cqr);
1404 erp->startdev = cqr->block->base;
1405 erp->function = dasd_3990_erp_inspect_alias;
1406 return erp;
1407 } else
1408 return NULL;
1409}
1410
1411
1412/*
1529 * DASD_3990_ERP_INSPECT_24 1413 * DASD_3990_ERP_INSPECT_24
1530 * 1414 *
1531 * DESCRIPTION 1415 * DESCRIPTION
@@ -1623,7 +1507,7 @@ static struct dasd_ccw_req *
1623dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense) 1507dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
1624{ 1508{
1625 1509
1626 struct dasd_device *device = erp->device; 1510 struct dasd_device *device = erp->startdev;
1627 1511
1628 erp->retries = 256; 1512 erp->retries = 256;
1629 erp->function = dasd_3990_erp_action_10_32; 1513 erp->function = dasd_3990_erp_action_10_32;
@@ -1657,13 +1541,14 @@ static struct dasd_ccw_req *
1657dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense) 1541dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1658{ 1542{
1659 1543
1660 struct dasd_device *device = default_erp->device; 1544 struct dasd_device *device = default_erp->startdev;
1661 __u32 cpa = 0; 1545 __u32 cpa = 0;
1662 struct dasd_ccw_req *cqr; 1546 struct dasd_ccw_req *cqr;
1663 struct dasd_ccw_req *erp; 1547 struct dasd_ccw_req *erp;
1664 struct DE_eckd_data *DE_data; 1548 struct DE_eckd_data *DE_data;
1549 struct PFX_eckd_data *PFX_data;
1665 char *LO_data; /* LO_eckd_data_t */ 1550 char *LO_data; /* LO_eckd_data_t */
1666 struct ccw1 *ccw; 1551 struct ccw1 *ccw, *oldccw;
1667 1552
1668 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1553 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1669 "Write not finished because of unexpected condition"); 1554 "Write not finished because of unexpected condition");
@@ -1702,8 +1587,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1702 /* Build new ERP request including DE/LO */ 1587 /* Build new ERP request including DE/LO */
1703 erp = dasd_alloc_erp_request((char *) &cqr->magic, 1588 erp = dasd_alloc_erp_request((char *) &cqr->magic,
1704 2 + 1,/* DE/LO + TIC */ 1589 2 + 1,/* DE/LO + TIC */
1705 sizeof (struct DE_eckd_data) + 1590 sizeof(struct DE_eckd_data) +
1706 sizeof (struct LO_eckd_data), device); 1591 sizeof(struct LO_eckd_data), device);
1707 1592
1708 if (IS_ERR(erp)) { 1593 if (IS_ERR(erp)) {
1709 DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP"); 1594 DEV_MESSAGE(KERN_ERR, device, "%s", "Unable to allocate ERP");
@@ -1712,10 +1597,16 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1712 1597
1713 /* use original DE */ 1598 /* use original DE */
1714 DE_data = erp->data; 1599 DE_data = erp->data;
1715 memcpy(DE_data, cqr->data, sizeof (struct DE_eckd_data)); 1600 oldccw = cqr->cpaddr;
1601 if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
1602 PFX_data = cqr->data;
1603 memcpy(DE_data, &PFX_data->define_extend,
1604 sizeof(struct DE_eckd_data));
1605 } else
1606 memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
1716 1607
1717 /* create LO */ 1608 /* create LO */
1718 LO_data = erp->data + sizeof (struct DE_eckd_data); 1609 LO_data = erp->data + sizeof(struct DE_eckd_data);
1719 1610
1720 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { 1611 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1721 1612
@@ -1748,7 +1639,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1748 1639
1749 /* create DE ccw */ 1640 /* create DE ccw */
1750 ccw = erp->cpaddr; 1641 ccw = erp->cpaddr;
1751 memset(ccw, 0, sizeof (struct ccw1)); 1642 memset(ccw, 0, sizeof(struct ccw1));
1752 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 1643 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
1753 ccw->flags = CCW_FLAG_CC; 1644 ccw->flags = CCW_FLAG_CC;
1754 ccw->count = 16; 1645 ccw->count = 16;
@@ -1756,7 +1647,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1756 1647
1757 /* create LO ccw */ 1648 /* create LO ccw */
1758 ccw++; 1649 ccw++;
1759 memset(ccw, 0, sizeof (struct ccw1)); 1650 memset(ccw, 0, sizeof(struct ccw1));
1760 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 1651 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
1761 ccw->flags = CCW_FLAG_CC; 1652 ccw->flags = CCW_FLAG_CC;
1762 ccw->count = 16; 1653 ccw->count = 16;
@@ -1770,7 +1661,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
1770 /* fill erp related fields */ 1661 /* fill erp related fields */
1771 erp->function = dasd_3990_erp_action_1B_32; 1662 erp->function = dasd_3990_erp_action_1B_32;
1772 erp->refers = default_erp->refers; 1663 erp->refers = default_erp->refers;
1773 erp->device = device; 1664 erp->startdev = device;
1665 erp->memdev = device;
1774 erp->magic = default_erp->magic; 1666 erp->magic = default_erp->magic;
1775 erp->expires = 0; 1667 erp->expires = 0;
1776 erp->retries = 256; 1668 erp->retries = 256;
@@ -1803,7 +1695,7 @@ static struct dasd_ccw_req *
1803dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense) 1695dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1804{ 1696{
1805 1697
1806 struct dasd_device *device = previous_erp->device; 1698 struct dasd_device *device = previous_erp->startdev;
1807 __u32 cpa = 0; 1699 __u32 cpa = 0;
1808 struct dasd_ccw_req *cqr; 1700 struct dasd_ccw_req *cqr;
1809 struct dasd_ccw_req *erp; 1701 struct dasd_ccw_req *erp;
@@ -1827,7 +1719,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1827 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1719 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1828 "Imprecise ending is set - just retry"); 1720 "Imprecise ending is set - just retry");
1829 1721
1830 previous_erp->status = DASD_CQR_QUEUED; 1722 previous_erp->status = DASD_CQR_FILLED;
1831 1723
1832 return previous_erp; 1724 return previous_erp;
1833 } 1725 }
@@ -1850,7 +1742,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1850 erp = previous_erp; 1742 erp = previous_erp;
1851 1743
1852 /* update the LO with the new returned sense data */ 1744 /* update the LO with the new returned sense data */
1853 LO_data = erp->data + sizeof (struct DE_eckd_data); 1745 LO_data = erp->data + sizeof(struct DE_eckd_data);
1854 1746
1855 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) { 1747 if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
1856 1748
@@ -1889,7 +1781,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
1889 ccw++; /* addr of TIC ccw */ 1781 ccw++; /* addr of TIC ccw */
1890 ccw->cda = cpa; 1782 ccw->cda = cpa;
1891 1783
1892 erp->status = DASD_CQR_QUEUED; 1784 erp->status = DASD_CQR_FILLED;
1893 1785
1894 return erp; 1786 return erp;
1895 1787
@@ -1968,9 +1860,7 @@ dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
1968 * try further actions. */ 1860 * try further actions. */
1969 1861
1970 erp->lpm = 0; 1862 erp->lpm = 0;
1971 1863 erp->status = DASD_CQR_NEED_ERP;
1972 erp->status = DASD_CQR_ERROR;
1973
1974 } 1864 }
1975 } 1865 }
1976 1866
@@ -2047,7 +1937,7 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
2047 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) { 1937 if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
2048 1938
2049 /* set to suspended duplex state then restart */ 1939 /* set to suspended duplex state then restart */
2050 struct dasd_device *device = erp->device; 1940 struct dasd_device *device = erp->startdev;
2051 1941
2052 DEV_MESSAGE(KERN_ERR, device, "%s", 1942 DEV_MESSAGE(KERN_ERR, device, "%s",
2053 "Set device to suspended duplex state should be " 1943 "Set device to suspended duplex state should be "
@@ -2081,28 +1971,26 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
2081{ 1971{
2082 1972
2083 if ((erp->function == dasd_3990_erp_compound_retry) && 1973 if ((erp->function == dasd_3990_erp_compound_retry) &&
2084 (erp->status == DASD_CQR_ERROR)) { 1974 (erp->status == DASD_CQR_NEED_ERP)) {
2085 1975
2086 dasd_3990_erp_compound_path(erp, sense); 1976 dasd_3990_erp_compound_path(erp, sense);
2087 } 1977 }
2088 1978
2089 if ((erp->function == dasd_3990_erp_compound_path) && 1979 if ((erp->function == dasd_3990_erp_compound_path) &&
2090 (erp->status == DASD_CQR_ERROR)) { 1980 (erp->status == DASD_CQR_NEED_ERP)) {
2091 1981
2092 erp = dasd_3990_erp_compound_code(erp, sense); 1982 erp = dasd_3990_erp_compound_code(erp, sense);
2093 } 1983 }
2094 1984
2095 if ((erp->function == dasd_3990_erp_compound_code) && 1985 if ((erp->function == dasd_3990_erp_compound_code) &&
2096 (erp->status == DASD_CQR_ERROR)) { 1986 (erp->status == DASD_CQR_NEED_ERP)) {
2097 1987
2098 dasd_3990_erp_compound_config(erp, sense); 1988 dasd_3990_erp_compound_config(erp, sense);
2099 } 1989 }
2100 1990
2101 /* if no compound action ERP specified, the request failed */ 1991 /* if no compound action ERP specified, the request failed */
2102 if (erp->status == DASD_CQR_ERROR) { 1992 if (erp->status == DASD_CQR_NEED_ERP)
2103
2104 erp->status = DASD_CQR_FAILED; 1993 erp->status = DASD_CQR_FAILED;
2105 }
2106 1994
2107 return erp; 1995 return erp;
2108 1996
@@ -2127,7 +2015,7 @@ static struct dasd_ccw_req *
2127dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense) 2015dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2128{ 2016{
2129 2017
2130 struct dasd_device *device = erp->device; 2018 struct dasd_device *device = erp->startdev;
2131 2019
2132 erp->function = dasd_3990_erp_inspect_32; 2020 erp->function = dasd_3990_erp_inspect_32;
2133 2021
@@ -2149,8 +2037,7 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
2149 2037
2150 case 0x01: /* fatal error */ 2038 case 0x01: /* fatal error */
2151 DEV_MESSAGE(KERN_ERR, device, "%s", 2039 DEV_MESSAGE(KERN_ERR, device, "%s",
2152 "Fatal error should have been " 2040 "Retry not recommended - Fatal error");
2153 "handled within the interrupt handler");
2154 2041
2155 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); 2042 erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
2156 break; 2043 break;
@@ -2253,6 +2140,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
2253 /* already set up new ERP ! */ 2140 /* already set up new ERP ! */
2254 char *sense = erp->refers->irb.ecw; 2141 char *sense = erp->refers->irb.ecw;
2255 2142
2143 /* if this problem occured on an alias retry on base */
2144 erp_new = dasd_3990_erp_inspect_alias(erp);
2145 if (erp_new)
2146 return erp_new;
2147
2256 /* distinguish between 24 and 32 byte sense data */ 2148 /* distinguish between 24 and 32 byte sense data */
2257 if (sense[27] & DASD_SENSE_BIT_0) { 2149 if (sense[27] & DASD_SENSE_BIT_0) {
2258 2150
@@ -2287,13 +2179,13 @@ static struct dasd_ccw_req *
2287dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr) 2179dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2288{ 2180{
2289 2181
2290 struct dasd_device *device = cqr->device; 2182 struct dasd_device *device = cqr->startdev;
2291 struct ccw1 *ccw; 2183 struct ccw1 *ccw;
2292 2184
2293 /* allocate additional request block */ 2185 /* allocate additional request block */
2294 struct dasd_ccw_req *erp; 2186 struct dasd_ccw_req *erp;
2295 2187
2296 erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, cqr->device); 2188 erp = dasd_alloc_erp_request((char *) &cqr->magic, 2, 0, device);
2297 if (IS_ERR(erp)) { 2189 if (IS_ERR(erp)) {
2298 if (cqr->retries <= 0) { 2190 if (cqr->retries <= 0) {
2299 DEV_MESSAGE(KERN_ERR, device, "%s", 2191 DEV_MESSAGE(KERN_ERR, device, "%s",
@@ -2305,7 +2197,7 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2305 "Unable to allocate ERP request " 2197 "Unable to allocate ERP request "
2306 "(%i retries left)", 2198 "(%i retries left)",
2307 cqr->retries); 2199 cqr->retries);
2308 dasd_set_timer(device, (HZ << 3)); 2200 dasd_block_set_timer(device->block, (HZ << 3));
2309 } 2201 }
2310 return cqr; 2202 return cqr;
2311 } 2203 }
@@ -2319,7 +2211,9 @@ dasd_3990_erp_add_erp(struct dasd_ccw_req * cqr)
2319 ccw->cda = (long)(cqr->cpaddr); 2211 ccw->cda = (long)(cqr->cpaddr);
2320 erp->function = dasd_3990_erp_add_erp; 2212 erp->function = dasd_3990_erp_add_erp;
2321 erp->refers = cqr; 2213 erp->refers = cqr;
2322 erp->device = cqr->device; 2214 erp->startdev = device;
2215 erp->memdev = device;
2216 erp->block = cqr->block;
2323 erp->magic = cqr->magic; 2217 erp->magic = cqr->magic;
2324 erp->expires = 0; 2218 erp->expires = 0;
2325 erp->retries = 256; 2219 erp->retries = 256;
@@ -2466,7 +2360,7 @@ static struct dasd_ccw_req *
2466dasd_3990_erp_further_erp(struct dasd_ccw_req *erp) 2360dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
2467{ 2361{
2468 2362
2469 struct dasd_device *device = erp->device; 2363 struct dasd_device *device = erp->startdev;
2470 char *sense = erp->irb.ecw; 2364 char *sense = erp->irb.ecw;
2471 2365
2472 /* check for 24 byte sense ERP */ 2366 /* check for 24 byte sense ERP */
@@ -2557,7 +2451,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2557 struct dasd_ccw_req *erp) 2451 struct dasd_ccw_req *erp)
2558{ 2452{
2559 2453
2560 struct dasd_device *device = erp_head->device; 2454 struct dasd_device *device = erp_head->startdev;
2561 struct dasd_ccw_req *erp_done = erp_head; /* finished req */ 2455 struct dasd_ccw_req *erp_done = erp_head; /* finished req */
2562 struct dasd_ccw_req *erp_free = NULL; /* req to be freed */ 2456 struct dasd_ccw_req *erp_free = NULL; /* req to be freed */
2563 2457
@@ -2569,13 +2463,13 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2569 "original request was lost\n"); 2463 "original request was lost\n");
2570 2464
2571 /* remove the request from the device queue */ 2465 /* remove the request from the device queue */
2572 list_del(&erp_done->list); 2466 list_del(&erp_done->blocklist);
2573 2467
2574 erp_free = erp_done; 2468 erp_free = erp_done;
2575 erp_done = erp_done->refers; 2469 erp_done = erp_done->refers;
2576 2470
2577 /* free the finished erp request */ 2471 /* free the finished erp request */
2578 dasd_free_erp_request(erp_free, erp_free->device); 2472 dasd_free_erp_request(erp_free, erp_free->memdev);
2579 2473
2580 } /* end while */ 2474 } /* end while */
2581 2475
@@ -2603,7 +2497,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2603 erp->retries, erp); 2497 erp->retries, erp);
2604 2498
2605 /* handle the request again... */ 2499 /* handle the request again... */
2606 erp->status = DASD_CQR_QUEUED; 2500 erp->status = DASD_CQR_FILLED;
2607 } 2501 }
2608 2502
2609 } else { 2503 } else {
@@ -2636,9 +2530,8 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
2636struct dasd_ccw_req * 2530struct dasd_ccw_req *
2637dasd_3990_erp_action(struct dasd_ccw_req * cqr) 2531dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2638{ 2532{
2639
2640 struct dasd_ccw_req *erp = NULL; 2533 struct dasd_ccw_req *erp = NULL;
2641 struct dasd_device *device = cqr->device; 2534 struct dasd_device *device = cqr->startdev;
2642 struct dasd_ccw_req *temp_erp = NULL; 2535 struct dasd_ccw_req *temp_erp = NULL;
2643 2536
2644 if (device->features & DASD_FEATURE_ERPLOG) { 2537 if (device->features & DASD_FEATURE_ERPLOG) {
@@ -2704,10 +2597,11 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2704 } 2597 }
2705 } 2598 }
2706 2599
2707 /* enqueue added ERP request */ 2600 /* enqueue ERP request if it's a new one */
2708 if (erp->status == DASD_CQR_FILLED) { 2601 if (list_empty(&erp->blocklist)) {
2709 erp->status = DASD_CQR_QUEUED; 2602 cqr->status = DASD_CQR_IN_ERP;
2710 list_add(&erp->list, &device->ccw_queue); 2603 /* add erp request before the cqr */
2604 list_add_tail(&erp->blocklist, &cqr->blocklist);
2711 } 2605 }
2712 2606
2713 return erp; 2607 return erp;
diff --git a/drivers/s390/block/dasd_9336_erp.c b/drivers/s390/block/dasd_9336_erp.c
deleted file mode 100644
index 6e082688475a..000000000000
--- a/drivers/s390/block/dasd_9336_erp.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9336_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 */
8
9#define PRINTK_HEADER "dasd_erp(9336)"
10
11#include "dasd_int.h"
12
13
14/*
15 * DASD_9336_ERP_EXAMINE
16 *
17 * DESCRIPTION
18 * Checks only for fatal/no/recover error.
19 * A detailed examination of the sense data is done later outside
20 * the interrupt handler.
21 *
22 * The logic is based on the 'IBM 3880 Storage Control Reference' manual
23 * 'Chapter 7. 9336 Sense Data'.
24 *
25 * RETURN VALUES
26 * dasd_era_none no error
27 * dasd_era_fatal for all fatal (unrecoverable errors)
28 * dasd_era_recover for all others.
29 */
30dasd_era_t
31dasd_9336_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
32{
33 /* check for successful execution first */
34 if (irb->scsw.cstat == 0x00 &&
35 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
36 return dasd_era_none;
37
38 /* examine the 24 byte sense data */
39 return dasd_era_recover;
40
41} /* END dasd_9336_erp_examine */
diff --git a/drivers/s390/block/dasd_9343_erp.c b/drivers/s390/block/dasd_9343_erp.c
deleted file mode 100644
index ddecb9808ed4..000000000000
--- a/drivers/s390/block/dasd_9343_erp.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * File...........: linux/drivers/s390/block/dasd_9345_erp.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Bugreports.to..: <Linux390@de.ibm.com>
5 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
6 *
7 */
8
9#define PRINTK_HEADER "dasd_erp(9343)"
10
11#include "dasd_int.h"
12
13dasd_era_t
14dasd_9343_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb)
15{
16 if (irb->scsw.cstat == 0x00 &&
17 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
18 return dasd_era_none;
19
20 return dasd_era_recover;
21}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
new file mode 100644
index 000000000000..3a40bee9d358
--- /dev/null
+++ b/drivers/s390/block/dasd_alias.c
@@ -0,0 +1,903 @@
1/*
2 * PAV alias management for the DASD ECKD discipline
3 *
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */
7
8#include <linux/list.h>
9#include <asm/ebcdic.h>
10#include "dasd_int.h"
11#include "dasd_eckd.h"
12
13#ifdef PRINTK_HEADER
14#undef PRINTK_HEADER
15#endif /* PRINTK_HEADER */
16#define PRINTK_HEADER "dasd(eckd):"
17
18
19/*
20 * General concept of alias management:
21 * - PAV and DASD alias management is specific to the eckd discipline.
22 * - A device is connected to an lcu as long as the device exists.
23 * dasd_alias_make_device_known_to_lcu will be called wenn the
24 * device is checked by the eckd discipline and
25 * dasd_alias_disconnect_device_from_lcu will be called
26 * before the device is deleted.
27 * - The dasd_alias_add_device / dasd_alias_remove_device
28 * functions mark the point when a device is 'ready for service'.
29 * - A summary unit check is a rare occasion, but it is mandatory to
30 * support it. It requires some complex recovery actions before the
31 * devices can be used again (see dasd_alias_handle_summary_unit_check).
32 * - dasd_alias_get_start_dev will find an alias device that can be used
33 * instead of the base device and does some (very simple) load balancing.
34 * This is the function that gets called for each I/O, so when improving
35 * something, this function should get faster or better, the rest has just
36 * to be correct.
37 */
38
39
40static void summary_unit_check_handling_work(struct work_struct *);
41static void lcu_update_work(struct work_struct *);
42static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
43
44static struct alias_root aliastree = {
45 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
46 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
47};
48
49static struct alias_server *_find_server(struct dasd_uid *uid)
50{
51 struct alias_server *pos;
52 list_for_each_entry(pos, &aliastree.serverlist, server) {
53 if (!strncmp(pos->uid.vendor, uid->vendor,
54 sizeof(uid->vendor))
55 && !strncmp(pos->uid.serial, uid->serial,
56 sizeof(uid->serial)))
57 return pos;
58 };
59 return NULL;
60}
61
62static struct alias_lcu *_find_lcu(struct alias_server *server,
63 struct dasd_uid *uid)
64{
65 struct alias_lcu *pos;
66 list_for_each_entry(pos, &server->lculist, lcu) {
67 if (pos->uid.ssid == uid->ssid)
68 return pos;
69 };
70 return NULL;
71}
72
73static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
74 struct dasd_uid *uid)
75{
76 struct alias_pav_group *pos;
77 __u8 search_unit_addr;
78
79 /* for hyper pav there is only one group */
80 if (lcu->pav == HYPER_PAV) {
81 if (list_empty(&lcu->grouplist))
82 return NULL;
83 else
84 return list_first_entry(&lcu->grouplist,
85 struct alias_pav_group, group);
86 }
87
88 /* for base pav we have to find the group that matches the base */
89 if (uid->type == UA_BASE_DEVICE)
90 search_unit_addr = uid->real_unit_addr;
91 else
92 search_unit_addr = uid->base_unit_addr;
93 list_for_each_entry(pos, &lcu->grouplist, group) {
94 if (pos->uid.base_unit_addr == search_unit_addr)
95 return pos;
96 };
97 return NULL;
98}
99
100static struct alias_server *_allocate_server(struct dasd_uid *uid)
101{
102 struct alias_server *server;
103
104 server = kzalloc(sizeof(*server), GFP_KERNEL);
105 if (!server)
106 return ERR_PTR(-ENOMEM);
107 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
108 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
109 INIT_LIST_HEAD(&server->server);
110 INIT_LIST_HEAD(&server->lculist);
111 return server;
112}
113
114static void _free_server(struct alias_server *server)
115{
116 kfree(server);
117}
118
119static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
120{
121 struct alias_lcu *lcu;
122
123 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
124 if (!lcu)
125 return ERR_PTR(-ENOMEM);
126 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
127 if (!lcu->uac)
128 goto out_err1;
129 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
130 if (!lcu->rsu_cqr)
131 goto out_err2;
132 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
133 GFP_KERNEL | GFP_DMA);
134 if (!lcu->rsu_cqr->cpaddr)
135 goto out_err3;
136 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
137 if (!lcu->rsu_cqr->data)
138 goto out_err4;
139
140 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
141 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
142 lcu->uid.ssid = uid->ssid;
143 lcu->pav = NO_PAV;
144 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
145 INIT_LIST_HEAD(&lcu->lcu);
146 INIT_LIST_HEAD(&lcu->inactive_devices);
147 INIT_LIST_HEAD(&lcu->active_devices);
148 INIT_LIST_HEAD(&lcu->grouplist);
149 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
150 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
151 spin_lock_init(&lcu->lock);
152 return lcu;
153
154out_err4:
155 kfree(lcu->rsu_cqr->cpaddr);
156out_err3:
157 kfree(lcu->rsu_cqr);
158out_err2:
159 kfree(lcu->uac);
160out_err1:
161 kfree(lcu);
162 return ERR_PTR(-ENOMEM);
163}
164
165static void _free_lcu(struct alias_lcu *lcu)
166{
167 kfree(lcu->rsu_cqr->data);
168 kfree(lcu->rsu_cqr->cpaddr);
169 kfree(lcu->rsu_cqr);
170 kfree(lcu->uac);
171 kfree(lcu);
172}
173
174/*
175 * This is the function that will allocate all the server and lcu data,
176 * so this function must be called first for a new device.
177 * If the return value is 1, the lcu was already known before, if it
178 * is 0, this is a new lcu.
179 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
180 */
181int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
182{
183 struct dasd_eckd_private *private;
184 unsigned long flags;
185 struct alias_server *server, *newserver;
186 struct alias_lcu *lcu, *newlcu;
187 int is_lcu_known;
188 struct dasd_uid *uid;
189
190 private = (struct dasd_eckd_private *) device->private;
191 uid = &private->uid;
192 spin_lock_irqsave(&aliastree.lock, flags);
193 is_lcu_known = 1;
194 server = _find_server(uid);
195 if (!server) {
196 spin_unlock_irqrestore(&aliastree.lock, flags);
197 newserver = _allocate_server(uid);
198 if (IS_ERR(newserver))
199 return PTR_ERR(newserver);
200 spin_lock_irqsave(&aliastree.lock, flags);
201 server = _find_server(uid);
202 if (!server) {
203 list_add(&newserver->server, &aliastree.serverlist);
204 server = newserver;
205 is_lcu_known = 0;
206 } else {
207 /* someone was faster */
208 _free_server(newserver);
209 }
210 }
211
212 lcu = _find_lcu(server, uid);
213 if (!lcu) {
214 spin_unlock_irqrestore(&aliastree.lock, flags);
215 newlcu = _allocate_lcu(uid);
216 if (IS_ERR(newlcu))
217 return PTR_ERR(lcu);
218 spin_lock_irqsave(&aliastree.lock, flags);
219 lcu = _find_lcu(server, uid);
220 if (!lcu) {
221 list_add(&newlcu->lcu, &server->lculist);
222 lcu = newlcu;
223 is_lcu_known = 0;
224 } else {
225 /* someone was faster */
226 _free_lcu(newlcu);
227 }
228 is_lcu_known = 0;
229 }
230 spin_lock(&lcu->lock);
231 list_add(&device->alias_list, &lcu->inactive_devices);
232 private->lcu = lcu;
233 spin_unlock(&lcu->lock);
234 spin_unlock_irqrestore(&aliastree.lock, flags);
235
236 return is_lcu_known;
237}
238
239/*
240 * This function removes a device from the scope of alias management.
241 * The complicated part is to make sure that it is not in use by
242 * any of the workers. If necessary cancel the work.
243 */
244void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
245{
246 struct dasd_eckd_private *private;
247 unsigned long flags;
248 struct alias_lcu *lcu;
249 struct alias_server *server;
250 int was_pending;
251
252 private = (struct dasd_eckd_private *) device->private;
253 lcu = private->lcu;
254 spin_lock_irqsave(&lcu->lock, flags);
255 list_del_init(&device->alias_list);
256 /* make sure that the workers don't use this device */
257 if (device == lcu->suc_data.device) {
258 spin_unlock_irqrestore(&lcu->lock, flags);
259 cancel_work_sync(&lcu->suc_data.worker);
260 spin_lock_irqsave(&lcu->lock, flags);
261 if (device == lcu->suc_data.device)
262 lcu->suc_data.device = NULL;
263 }
264 was_pending = 0;
265 if (device == lcu->ruac_data.device) {
266 spin_unlock_irqrestore(&lcu->lock, flags);
267 was_pending = 1;
268 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
269 spin_lock_irqsave(&lcu->lock, flags);
270 if (device == lcu->ruac_data.device)
271 lcu->ruac_data.device = NULL;
272 }
273 private->lcu = NULL;
274 spin_unlock_irqrestore(&lcu->lock, flags);
275
276 spin_lock_irqsave(&aliastree.lock, flags);
277 spin_lock(&lcu->lock);
278 if (list_empty(&lcu->grouplist) &&
279 list_empty(&lcu->active_devices) &&
280 list_empty(&lcu->inactive_devices)) {
281 list_del(&lcu->lcu);
282 spin_unlock(&lcu->lock);
283 _free_lcu(lcu);
284 lcu = NULL;
285 } else {
286 if (was_pending)
287 _schedule_lcu_update(lcu, NULL);
288 spin_unlock(&lcu->lock);
289 }
290 server = _find_server(&private->uid);
291 if (server && list_empty(&server->lculist)) {
292 list_del(&server->server);
293 _free_server(server);
294 }
295 spin_unlock_irqrestore(&aliastree.lock, flags);
296}
297
298/*
299 * This function assumes that the unit address configuration stored
300 * in the lcu is up to date and will update the device uid before
301 * adding it to a pav group.
302 */
303static int _add_device_to_lcu(struct alias_lcu *lcu,
304 struct dasd_device *device)
305{
306
307 struct dasd_eckd_private *private;
308 struct alias_pav_group *group;
309 struct dasd_uid *uid;
310
311 private = (struct dasd_eckd_private *) device->private;
312 uid = &private->uid;
313 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
314 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
315 dasd_set_uid(device->cdev, &private->uid);
316
317 /* if we have no PAV anyway, we don't need to bother with PAV groups */
318 if (lcu->pav == NO_PAV) {
319 list_move(&device->alias_list, &lcu->active_devices);
320 return 0;
321 }
322
323 group = _find_group(lcu, uid);
324 if (!group) {
325 group = kzalloc(sizeof(*group), GFP_ATOMIC);
326 if (!group)
327 return -ENOMEM;
328 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
329 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
330 group->uid.ssid = uid->ssid;
331 if (uid->type == UA_BASE_DEVICE)
332 group->uid.base_unit_addr = uid->real_unit_addr;
333 else
334 group->uid.base_unit_addr = uid->base_unit_addr;
335 INIT_LIST_HEAD(&group->group);
336 INIT_LIST_HEAD(&group->baselist);
337 INIT_LIST_HEAD(&group->aliaslist);
338 list_add(&group->group, &lcu->grouplist);
339 }
340 if (uid->type == UA_BASE_DEVICE)
341 list_move(&device->alias_list, &group->baselist);
342 else
343 list_move(&device->alias_list, &group->aliaslist);
344 private->pavgroup = group;
345 return 0;
346};
347
348static void _remove_device_from_lcu(struct alias_lcu *lcu,
349 struct dasd_device *device)
350{
351 struct dasd_eckd_private *private;
352 struct alias_pav_group *group;
353
354 private = (struct dasd_eckd_private *) device->private;
355 list_move(&device->alias_list, &lcu->inactive_devices);
356 group = private->pavgroup;
357 if (!group)
358 return;
359 private->pavgroup = NULL;
360 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
361 list_del(&group->group);
362 kfree(group);
363 return;
364 }
365 if (group->next == device)
366 group->next = NULL;
367};
368
369static int read_unit_address_configuration(struct dasd_device *device,
370 struct alias_lcu *lcu)
371{
372 struct dasd_psf_prssd_data *prssdp;
373 struct dasd_ccw_req *cqr;
374 struct ccw1 *ccw;
375 int rc;
376 unsigned long flags;
377
378 cqr = dasd_kmalloc_request("ECKD",
379 1 /* PSF */ + 1 /* RSSD */ ,
380 (sizeof(struct dasd_psf_prssd_data)),
381 device);
382 if (IS_ERR(cqr))
383 return PTR_ERR(cqr);
384 cqr->startdev = device;
385 cqr->memdev = device;
386 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
387 cqr->retries = 10;
388 cqr->expires = 20 * HZ;
389
390 /* Prepare for Read Subsystem Data */
391 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
392 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
393 prssdp->order = PSF_ORDER_PRSSD;
394 prssdp->suborder = 0x0e; /* Read unit address configuration */
395 /* all other bytes of prssdp must be zero */
396
397 ccw = cqr->cpaddr;
398 ccw->cmd_code = DASD_ECKD_CCW_PSF;
399 ccw->count = sizeof(struct dasd_psf_prssd_data);
400 ccw->flags |= CCW_FLAG_CC;
401 ccw->cda = (__u32)(addr_t) prssdp;
402
403 /* Read Subsystem Data - feature codes */
404 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
405
406 ccw++;
407 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
408 ccw->count = sizeof(*(lcu->uac));
409 ccw->cda = (__u32)(addr_t) lcu->uac;
410
411 cqr->buildclk = get_clock();
412 cqr->status = DASD_CQR_FILLED;
413
414 /* need to unset flag here to detect race with summary unit check */
415 spin_lock_irqsave(&lcu->lock, flags);
416 lcu->flags &= ~NEED_UAC_UPDATE;
417 spin_unlock_irqrestore(&lcu->lock, flags);
418
419 do {
420 rc = dasd_sleep_on(cqr);
421 } while (rc && (cqr->retries > 0));
422 if (rc) {
423 spin_lock_irqsave(&lcu->lock, flags);
424 lcu->flags |= NEED_UAC_UPDATE;
425 spin_unlock_irqrestore(&lcu->lock, flags);
426 }
427 dasd_kfree_request(cqr, cqr->memdev);
428 return rc;
429}
430
431static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
432{
433 unsigned long flags;
434 struct alias_pav_group *pavgroup, *tempgroup;
435 struct dasd_device *device, *tempdev;
436 int i, rc;
437 struct dasd_eckd_private *private;
438
439 spin_lock_irqsave(&lcu->lock, flags);
440 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
441 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
442 alias_list) {
443 list_move(&device->alias_list, &lcu->active_devices);
444 private = (struct dasd_eckd_private *) device->private;
445 private->pavgroup = NULL;
446 }
447 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
448 alias_list) {
449 list_move(&device->alias_list, &lcu->active_devices);
450 private = (struct dasd_eckd_private *) device->private;
451 private->pavgroup = NULL;
452 }
453 list_del(&pavgroup->group);
454 kfree(pavgroup);
455 }
456 spin_unlock_irqrestore(&lcu->lock, flags);
457
458 rc = read_unit_address_configuration(refdev, lcu);
459 if (rc)
460 return rc;
461
462 spin_lock_irqsave(&lcu->lock, flags);
463 lcu->pav = NO_PAV;
464 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
465 switch (lcu->uac->unit[i].ua_type) {
466 case UA_BASE_PAV_ALIAS:
467 lcu->pav = BASE_PAV;
468 break;
469 case UA_HYPER_PAV_ALIAS:
470 lcu->pav = HYPER_PAV;
471 break;
472 }
473 if (lcu->pav != NO_PAV)
474 break;
475 }
476
477 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
478 alias_list) {
479 _add_device_to_lcu(lcu, device);
480 }
481 spin_unlock_irqrestore(&lcu->lock, flags);
482 return 0;
483}
484
485static void lcu_update_work(struct work_struct *work)
486{
487 struct alias_lcu *lcu;
488 struct read_uac_work_data *ruac_data;
489 struct dasd_device *device;
490 unsigned long flags;
491 int rc;
492
493 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
494 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
495 device = ruac_data->device;
496 rc = _lcu_update(device, lcu);
497 /*
498 * Need to check flags again, as there could have been another
499 * prepare_update or a new device a new device while we were still
500 * processing the data
501 */
502 spin_lock_irqsave(&lcu->lock, flags);
503 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
504 DEV_MESSAGE(KERN_WARNING, device, "could not update"
505 " alias data in lcu (rc = %d), retry later", rc);
506 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
507 } else {
508 lcu->ruac_data.device = NULL;
509 lcu->flags &= ~UPDATE_PENDING;
510 }
511 spin_unlock_irqrestore(&lcu->lock, flags);
512}
513
514static int _schedule_lcu_update(struct alias_lcu *lcu,
515 struct dasd_device *device)
516{
517 struct dasd_device *usedev = NULL;
518 struct alias_pav_group *group;
519
520 lcu->flags |= NEED_UAC_UPDATE;
521 if (lcu->ruac_data.device) {
522 /* already scheduled or running */
523 return 0;
524 }
525 if (device && !list_empty(&device->alias_list))
526 usedev = device;
527
528 if (!usedev && !list_empty(&lcu->grouplist)) {
529 group = list_first_entry(&lcu->grouplist,
530 struct alias_pav_group, group);
531 if (!list_empty(&group->baselist))
532 usedev = list_first_entry(&group->baselist,
533 struct dasd_device,
534 alias_list);
535 else if (!list_empty(&group->aliaslist))
536 usedev = list_first_entry(&group->aliaslist,
537 struct dasd_device,
538 alias_list);
539 }
540 if (!usedev && !list_empty(&lcu->active_devices)) {
541 usedev = list_first_entry(&lcu->active_devices,
542 struct dasd_device, alias_list);
543 }
544 /*
545 * if we haven't found a proper device yet, give up for now, the next
546 * device that will be set active will trigger an lcu update
547 */
548 if (!usedev)
549 return -EINVAL;
550 lcu->ruac_data.device = usedev;
551 schedule_delayed_work(&lcu->ruac_data.dwork, 0);
552 return 0;
553}
554
555int dasd_alias_add_device(struct dasd_device *device)
556{
557 struct dasd_eckd_private *private;
558 struct alias_lcu *lcu;
559 unsigned long flags;
560 int rc;
561
562 private = (struct dasd_eckd_private *) device->private;
563 lcu = private->lcu;
564 rc = 0;
565 spin_lock_irqsave(&lcu->lock, flags);
566 if (!(lcu->flags & UPDATE_PENDING)) {
567 rc = _add_device_to_lcu(lcu, device);
568 if (rc)
569 lcu->flags |= UPDATE_PENDING;
570 }
571 if (lcu->flags & UPDATE_PENDING) {
572 list_move(&device->alias_list, &lcu->active_devices);
573 _schedule_lcu_update(lcu, device);
574 }
575 spin_unlock_irqrestore(&lcu->lock, flags);
576 return rc;
577}
578
579int dasd_alias_remove_device(struct dasd_device *device)
580{
581 struct dasd_eckd_private *private;
582 struct alias_lcu *lcu;
583 unsigned long flags;
584
585 private = (struct dasd_eckd_private *) device->private;
586 lcu = private->lcu;
587 spin_lock_irqsave(&lcu->lock, flags);
588 _remove_device_from_lcu(lcu, device);
589 spin_unlock_irqrestore(&lcu->lock, flags);
590 return 0;
591}
592
593struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
594{
595
596 struct dasd_device *alias_device;
597 struct alias_pav_group *group;
598 struct alias_lcu *lcu;
599 struct dasd_eckd_private *private, *alias_priv;
600 unsigned long flags;
601
602 private = (struct dasd_eckd_private *) base_device->private;
603 group = private->pavgroup;
604 lcu = private->lcu;
605 if (!group || !lcu)
606 return NULL;
607 if (lcu->pav == NO_PAV ||
608 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
609 return NULL;
610
611 spin_lock_irqsave(&lcu->lock, flags);
612 alias_device = group->next;
613 if (!alias_device) {
614 if (list_empty(&group->aliaslist)) {
615 spin_unlock_irqrestore(&lcu->lock, flags);
616 return NULL;
617 } else {
618 alias_device = list_first_entry(&group->aliaslist,
619 struct dasd_device,
620 alias_list);
621 }
622 }
623 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
624 group->next = list_first_entry(&group->aliaslist,
625 struct dasd_device, alias_list);
626 else
627 group->next = list_first_entry(&alias_device->alias_list,
628 struct dasd_device, alias_list);
629 spin_unlock_irqrestore(&lcu->lock, flags);
630 alias_priv = (struct dasd_eckd_private *) alias_device->private;
631 if ((alias_priv->count < private->count) && !alias_device->stopped)
632 return alias_device;
633 else
634 return NULL;
635}
636
637/*
638 * Summary unit check handling depends on the way alias devices
639 * are handled so it is done here rather then in dasd_eckd.c
640 */
641static int reset_summary_unit_check(struct alias_lcu *lcu,
642 struct dasd_device *device,
643 char reason)
644{
645 struct dasd_ccw_req *cqr;
646 int rc = 0;
647
648 cqr = lcu->rsu_cqr;
649 strncpy((char *) &cqr->magic, "ECKD", 4);
650 ASCEBC((char *) &cqr->magic, 4);
651 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RSCK;
652 cqr->cpaddr->flags = 0 ;
653 cqr->cpaddr->count = 16;
654 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
655 ((char *)cqr->data)[0] = reason;
656
657 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
658 cqr->retries = 255; /* set retry counter to enable basic ERP */
659 cqr->startdev = device;
660 cqr->memdev = device;
661 cqr->block = NULL;
662 cqr->expires = 5 * HZ;
663 cqr->buildclk = get_clock();
664 cqr->status = DASD_CQR_FILLED;
665
666 rc = dasd_sleep_on_immediatly(cqr);
667 return rc;
668}
669
670static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
671{
672 struct alias_pav_group *pavgroup;
673 struct dasd_device *device;
674 struct dasd_eckd_private *private;
675
676 /* active and inactive list can contain alias as well as base devices */
677 list_for_each_entry(device, &lcu->active_devices, alias_list) {
678 private = (struct dasd_eckd_private *) device->private;
679 if (private->uid.type != UA_BASE_DEVICE)
680 continue;
681 dasd_schedule_block_bh(device->block);
682 dasd_schedule_device_bh(device);
683 }
684 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
685 private = (struct dasd_eckd_private *) device->private;
686 if (private->uid.type != UA_BASE_DEVICE)
687 continue;
688 dasd_schedule_block_bh(device->block);
689 dasd_schedule_device_bh(device);
690 }
691 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
692 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
693 dasd_schedule_block_bh(device->block);
694 dasd_schedule_device_bh(device);
695 }
696 }
697}
698
699static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
700{
701 struct alias_pav_group *pavgroup;
702 struct dasd_device *device, *temp;
703 struct dasd_eckd_private *private;
704 int rc;
705 unsigned long flags;
706 LIST_HEAD(active);
707
708 /*
709 * Problem here ist that dasd_flush_device_queue may wait
710 * for termination of a request to complete. We can't keep
711 * the lcu lock during that time, so we must assume that
712 * the lists may have changed.
713 * Idea: first gather all active alias devices in a separate list,
714 * then flush the first element of this list unlocked, and afterwards
715 * check if it is still on the list before moving it to the
716 * active_devices list.
717 */
718
719 spin_lock_irqsave(&lcu->lock, flags);
720 list_for_each_entry_safe(device, temp, &lcu->active_devices,
721 alias_list) {
722 private = (struct dasd_eckd_private *) device->private;
723 if (private->uid.type == UA_BASE_DEVICE)
724 continue;
725 list_move(&device->alias_list, &active);
726 }
727
728 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
729 list_splice_init(&pavgroup->aliaslist, &active);
730 }
731 while (!list_empty(&active)) {
732 device = list_first_entry(&active, struct dasd_device,
733 alias_list);
734 spin_unlock_irqrestore(&lcu->lock, flags);
735 rc = dasd_flush_device_queue(device);
736 spin_lock_irqsave(&lcu->lock, flags);
737 /*
738 * only move device around if it wasn't moved away while we
739 * were waiting for the flush
740 */
741 if (device == list_first_entry(&active,
742 struct dasd_device, alias_list))
743 list_move(&device->alias_list, &lcu->active_devices);
744 }
745 spin_unlock_irqrestore(&lcu->lock, flags);
746}
747
748/*
749 * This function is called in interrupt context, so the
750 * cdev lock for device is already locked!
751 */
752static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
753 struct dasd_device *device)
754{
755 struct alias_pav_group *pavgroup;
756 struct dasd_device *pos;
757
758 list_for_each_entry(pos, &lcu->active_devices, alias_list) {
759 if (pos != device)
760 spin_lock(get_ccwdev_lock(pos->cdev));
761 pos->stopped |= DASD_STOPPED_SU;
762 if (pos != device)
763 spin_unlock(get_ccwdev_lock(pos->cdev));
764 }
765 list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
766 if (pos != device)
767 spin_lock(get_ccwdev_lock(pos->cdev));
768 pos->stopped |= DASD_STOPPED_SU;
769 if (pos != device)
770 spin_unlock(get_ccwdev_lock(pos->cdev));
771 }
772 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
773 list_for_each_entry(pos, &pavgroup->baselist, alias_list) {
774 if (pos != device)
775 spin_lock(get_ccwdev_lock(pos->cdev));
776 pos->stopped |= DASD_STOPPED_SU;
777 if (pos != device)
778 spin_unlock(get_ccwdev_lock(pos->cdev));
779 }
780 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
781 if (pos != device)
782 spin_lock(get_ccwdev_lock(pos->cdev));
783 pos->stopped |= DASD_STOPPED_SU;
784 if (pos != device)
785 spin_unlock(get_ccwdev_lock(pos->cdev));
786 }
787 }
788}
789
790static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
791{
792 struct alias_pav_group *pavgroup;
793 struct dasd_device *device;
794 unsigned long flags;
795
796 list_for_each_entry(device, &lcu->active_devices, alias_list) {
797 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
798 device->stopped &= ~DASD_STOPPED_SU;
799 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
800 }
801
802 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
803 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
804 device->stopped &= ~DASD_STOPPED_SU;
805 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
806 }
807
808 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
809 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
810 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
811 device->stopped &= ~DASD_STOPPED_SU;
812 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
813 flags);
814 }
815 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
816 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
817 device->stopped &= ~DASD_STOPPED_SU;
818 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
819 flags);
820 }
821 }
822}
823
824static void summary_unit_check_handling_work(struct work_struct *work)
825{
826 struct alias_lcu *lcu;
827 struct summary_unit_check_work_data *suc_data;
828 unsigned long flags;
829 struct dasd_device *device;
830
831 suc_data = container_of(work, struct summary_unit_check_work_data,
832 worker);
833 lcu = container_of(suc_data, struct alias_lcu, suc_data);
834 device = suc_data->device;
835
836 /* 1. flush alias devices */
837 flush_all_alias_devices_on_lcu(lcu);
838
839 /* 2. reset summary unit check */
840 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
841 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
842 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
843 reset_summary_unit_check(lcu, device, suc_data->reason);
844
845 spin_lock_irqsave(&lcu->lock, flags);
846 _unstop_all_devices_on_lcu(lcu);
847 _restart_all_base_devices_on_lcu(lcu);
848 /* 3. read new alias configuration */
849 _schedule_lcu_update(lcu, device);
850 lcu->suc_data.device = NULL;
851 spin_unlock_irqrestore(&lcu->lock, flags);
852}
853
854/*
855 * note: this will be called from int handler context (cdev locked)
856 */
857void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
858 struct irb *irb)
859{
860 struct alias_lcu *lcu;
861 char reason;
862 struct dasd_eckd_private *private;
863
864 private = (struct dasd_eckd_private *) device->private;
865
866 reason = irb->ecw[8];
867 DEV_MESSAGE(KERN_WARNING, device, "%s %x",
868 "eckd handle summary unit check: reason", reason);
869
870 lcu = private->lcu;
871 if (!lcu) {
872 DEV_MESSAGE(KERN_WARNING, device, "%s",
873 "device not ready to handle summary"
874 " unit check (no lcu structure)");
875 return;
876 }
877 spin_lock(&lcu->lock);
878 _stop_all_devices_on_lcu(lcu, device);
879 /* prepare for lcu_update */
880 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
881 /* If this device is about to be removed just return and wait for
882 * the next interrupt on a different device
883 */
884 if (list_empty(&device->alias_list)) {
885 DEV_MESSAGE(KERN_WARNING, device, "%s",
886 "device is in offline processing,"
887 " don't do summary unit check handling");
888 spin_unlock(&lcu->lock);
889 return;
890 }
891 if (lcu->suc_data.device) {
892 /* already scheduled or running */
893 DEV_MESSAGE(KERN_WARNING, device, "%s",
894 "previous instance of summary unit check worker"
895 " still pending");
896 spin_unlock(&lcu->lock);
897 return ;
898 }
899 lcu->suc_data.reason = reason;
900 lcu->suc_data.device = device;
901 spin_unlock(&lcu->lock);
902 schedule_work(&lcu->suc_data.worker);
903};
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 0c67258fb9ec..f4fb40257348 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -49,22 +49,6 @@ struct dasd_devmap {
49}; 49};
50 50
51/* 51/*
52 * dasd_server_ssid_map contains a globally unique storage server subsystem ID.
53 * dasd_server_ssid_list contains the list of all subsystem IDs accessed by
54 * the DASD device driver.
55 */
56struct dasd_server_ssid_map {
57 struct list_head list;
58 struct system_id {
59 char vendor[4];
60 char serial[15];
61 __u16 ssid;
62 } sid;
63};
64
65static struct list_head dasd_server_ssid_list;
66
67/*
68 * Parameter parsing functions for dasd= parameter. The syntax is: 52 * Parameter parsing functions for dasd= parameter. The syntax is:
69 * <devno> : (0x)?[0-9a-fA-F]+ 53 * <devno> : (0x)?[0-9a-fA-F]+
70 * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+ 54 * <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
@@ -721,8 +705,9 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
721 devmap->features &= ~DASD_FEATURE_READONLY; 705 devmap->features &= ~DASD_FEATURE_READONLY;
722 if (devmap->device) 706 if (devmap->device)
723 devmap->device->features = devmap->features; 707 devmap->device->features = devmap->features;
724 if (devmap->device && devmap->device->gdp) 708 if (devmap->device && devmap->device->block
725 set_disk_ro(devmap->device->gdp, val); 709 && devmap->device->block->gdp)
710 set_disk_ro(devmap->device->block->gdp, val);
726 spin_unlock(&dasd_devmap_lock); 711 spin_unlock(&dasd_devmap_lock);
727 return count; 712 return count;
728} 713}
@@ -893,12 +878,16 @@ dasd_alias_show(struct device *dev, struct device_attribute *attr, char *buf)
893 878
894 devmap = dasd_find_busid(dev->bus_id); 879 devmap = dasd_find_busid(dev->bus_id);
895 spin_lock(&dasd_devmap_lock); 880 spin_lock(&dasd_devmap_lock);
896 if (!IS_ERR(devmap)) 881 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
897 alias = devmap->uid.alias; 882 spin_unlock(&dasd_devmap_lock);
883 return sprintf(buf, "0\n");
884 }
885 if (devmap->uid.type == UA_BASE_PAV_ALIAS ||
886 devmap->uid.type == UA_HYPER_PAV_ALIAS)
887 alias = 1;
898 else 888 else
899 alias = 0; 889 alias = 0;
900 spin_unlock(&dasd_devmap_lock); 890 spin_unlock(&dasd_devmap_lock);
901
902 return sprintf(buf, alias ? "1\n" : "0\n"); 891 return sprintf(buf, alias ? "1\n" : "0\n");
903} 892}
904 893
@@ -930,19 +919,36 @@ static ssize_t
930dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf) 919dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
931{ 920{
932 struct dasd_devmap *devmap; 921 struct dasd_devmap *devmap;
933 char uid[UID_STRLEN]; 922 char uid_string[UID_STRLEN];
923 char ua_string[3];
924 struct dasd_uid *uid;
934 925
935 devmap = dasd_find_busid(dev->bus_id); 926 devmap = dasd_find_busid(dev->bus_id);
936 spin_lock(&dasd_devmap_lock); 927 spin_lock(&dasd_devmap_lock);
937 if (!IS_ERR(devmap) && strlen(devmap->uid.vendor) > 0) 928 if (IS_ERR(devmap) || strlen(devmap->uid.vendor) == 0) {
938 snprintf(uid, sizeof(uid), "%s.%s.%04x.%02x", 929 spin_unlock(&dasd_devmap_lock);
939 devmap->uid.vendor, devmap->uid.serial, 930 return sprintf(buf, "\n");
940 devmap->uid.ssid, devmap->uid.unit_addr); 931 }
941 else 932 uid = &devmap->uid;
942 uid[0] = 0; 933 switch (uid->type) {
934 case UA_BASE_DEVICE:
935 sprintf(ua_string, "%02x", uid->real_unit_addr);
936 break;
937 case UA_BASE_PAV_ALIAS:
938 sprintf(ua_string, "%02x", uid->base_unit_addr);
939 break;
940 case UA_HYPER_PAV_ALIAS:
941 sprintf(ua_string, "xx");
942 break;
943 default:
944 /* should not happen, treat like base device */
945 sprintf(ua_string, "%02x", uid->real_unit_addr);
946 break;
947 }
948 snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s",
949 uid->vendor, uid->serial, uid->ssid, ua_string);
943 spin_unlock(&dasd_devmap_lock); 950 spin_unlock(&dasd_devmap_lock);
944 951 return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
945 return snprintf(buf, PAGE_SIZE, "%s\n", uid);
946} 952}
947 953
948static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL); 954static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
@@ -1040,39 +1046,16 @@ int
1040dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) 1046dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
1041{ 1047{
1042 struct dasd_devmap *devmap; 1048 struct dasd_devmap *devmap;
1043 struct dasd_server_ssid_map *srv, *tmp;
1044 1049
1045 devmap = dasd_find_busid(cdev->dev.bus_id); 1050 devmap = dasd_find_busid(cdev->dev.bus_id);
1046 if (IS_ERR(devmap)) 1051 if (IS_ERR(devmap))
1047 return PTR_ERR(devmap); 1052 return PTR_ERR(devmap);
1048 1053
1049 /* generate entry for server_ssid_map */
1050 srv = (struct dasd_server_ssid_map *)
1051 kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL);
1052 if (!srv)
1053 return -ENOMEM;
1054 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
1055 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
1056 srv->sid.ssid = uid->ssid;
1057
1058 /* server is already contained ? */
1059 spin_lock(&dasd_devmap_lock); 1054 spin_lock(&dasd_devmap_lock);
1060 devmap->uid = *uid; 1055 devmap->uid = *uid;
1061 list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
1062 if (!memcmp(&srv->sid, &tmp->sid,
1063 sizeof(struct system_id))) {
1064 kfree(srv);
1065 srv = NULL;
1066 break;
1067 }
1068 }
1069
1070 /* add servermap to serverlist */
1071 if (srv)
1072 list_add(&srv->list, &dasd_server_ssid_list);
1073 spin_unlock(&dasd_devmap_lock); 1056 spin_unlock(&dasd_devmap_lock);
1074 1057
1075 return (srv ? 1 : 0); 1058 return 0;
1076} 1059}
1077EXPORT_SYMBOL_GPL(dasd_set_uid); 1060EXPORT_SYMBOL_GPL(dasd_set_uid);
1078 1061
@@ -1138,9 +1121,6 @@ dasd_devmap_init(void)
1138 dasd_max_devindex = 0; 1121 dasd_max_devindex = 0;
1139 for (i = 0; i < 256; i++) 1122 for (i = 0; i < 256; i++)
1140 INIT_LIST_HEAD(&dasd_hashlists[i]); 1123 INIT_LIST_HEAD(&dasd_hashlists[i]);
1141
1142 /* Initialize servermap structure. */
1143 INIT_LIST_HEAD(&dasd_server_ssid_list);
1144 return 0; 1124 return 0;
1145} 1125}
1146 1126
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 571320ab9e1a..d91df38ee4f7 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -142,7 +142,7 @@ dasd_diag_erp(struct dasd_device *device)
142 int rc; 142 int rc;
143 143
144 mdsk_term_io(device); 144 mdsk_term_io(device);
145 rc = mdsk_init_io(device, device->bp_block, 0, NULL); 145 rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
146 if (rc) 146 if (rc)
147 DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, " 147 DEV_MESSAGE(KERN_WARNING, device, "DIAG ERP unsuccessful, "
148 "rc=%d", rc); 148 "rc=%d", rc);
@@ -158,11 +158,11 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
158 struct dasd_diag_req *dreq; 158 struct dasd_diag_req *dreq;
159 int rc; 159 int rc;
160 160
161 device = cqr->device; 161 device = cqr->startdev;
162 if (cqr->retries < 0) { 162 if (cqr->retries < 0) {
163 DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p " 163 DEV_MESSAGE(KERN_WARNING, device, "DIAG start_IO: request %p "
164 "- no retry left)", cqr); 164 "- no retry left)", cqr);
165 cqr->status = DASD_CQR_FAILED; 165 cqr->status = DASD_CQR_ERROR;
166 return -EIO; 166 return -EIO;
167 } 167 }
168 private = (struct dasd_diag_private *) device->private; 168 private = (struct dasd_diag_private *) device->private;
@@ -184,7 +184,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
184 switch (rc) { 184 switch (rc) {
185 case 0: /* Synchronous I/O finished successfully */ 185 case 0: /* Synchronous I/O finished successfully */
186 cqr->stopclk = get_clock(); 186 cqr->stopclk = get_clock();
187 cqr->status = DASD_CQR_DONE; 187 cqr->status = DASD_CQR_SUCCESS;
188 /* Indicate to calling function that only a dasd_schedule_bh() 188 /* Indicate to calling function that only a dasd_schedule_bh()
189 and no timer is needed */ 189 and no timer is needed */
190 rc = -EACCES; 190 rc = -EACCES;
@@ -209,12 +209,12 @@ dasd_diag_term_IO(struct dasd_ccw_req * cqr)
209{ 209{
210 struct dasd_device *device; 210 struct dasd_device *device;
211 211
212 device = cqr->device; 212 device = cqr->startdev;
213 mdsk_term_io(device); 213 mdsk_term_io(device);
214 mdsk_init_io(device, device->bp_block, 0, NULL); 214 mdsk_init_io(device, device->block->bp_block, 0, NULL);
215 cqr->status = DASD_CQR_CLEAR; 215 cqr->status = DASD_CQR_CLEAR_PENDING;
216 cqr->stopclk = get_clock(); 216 cqr->stopclk = get_clock();
217 dasd_schedule_bh(device); 217 dasd_schedule_device_bh(device);
218 return 0; 218 return 0;
219} 219}
220 220
@@ -247,7 +247,7 @@ dasd_ext_handler(__u16 code)
247 return; 247 return;
248 } 248 }
249 cqr = (struct dasd_ccw_req *) ip; 249 cqr = (struct dasd_ccw_req *) ip;
250 device = (struct dasd_device *) cqr->device; 250 device = (struct dasd_device *) cqr->startdev;
251 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 251 if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
252 DEV_MESSAGE(KERN_WARNING, device, 252 DEV_MESSAGE(KERN_WARNING, device,
253 " magic number of dasd_ccw_req 0x%08X doesn't" 253 " magic number of dasd_ccw_req 0x%08X doesn't"
@@ -260,10 +260,10 @@ dasd_ext_handler(__u16 code)
260 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 260 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
261 261
262 /* Check for a pending clear operation */ 262 /* Check for a pending clear operation */
263 if (cqr->status == DASD_CQR_CLEAR) { 263 if (cqr->status == DASD_CQR_CLEAR_PENDING) {
264 cqr->status = DASD_CQR_QUEUED; 264 cqr->status = DASD_CQR_CLEARED;
265 dasd_clear_timer(device); 265 dasd_device_clear_timer(device);
266 dasd_schedule_bh(device); 266 dasd_schedule_device_bh(device);
267 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 267 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
268 return; 268 return;
269 } 269 }
@@ -272,11 +272,11 @@ dasd_ext_handler(__u16 code)
272 272
273 expires = 0; 273 expires = 0;
274 if (status == 0) { 274 if (status == 0) {
275 cqr->status = DASD_CQR_DONE; 275 cqr->status = DASD_CQR_SUCCESS;
276 /* Start first request on queue if possible -> fast_io. */ 276 /* Start first request on queue if possible -> fast_io. */
277 if (!list_empty(&device->ccw_queue)) { 277 if (!list_empty(&device->ccw_queue)) {
278 next = list_entry(device->ccw_queue.next, 278 next = list_entry(device->ccw_queue.next,
279 struct dasd_ccw_req, list); 279 struct dasd_ccw_req, devlist);
280 if (next->status == DASD_CQR_QUEUED) { 280 if (next->status == DASD_CQR_QUEUED) {
281 rc = dasd_start_diag(next); 281 rc = dasd_start_diag(next);
282 if (rc == 0) 282 if (rc == 0)
@@ -296,10 +296,10 @@ dasd_ext_handler(__u16 code)
296 } 296 }
297 297
298 if (expires != 0) 298 if (expires != 0)
299 dasd_set_timer(device, expires); 299 dasd_device_set_timer(device, expires);
300 else 300 else
301 dasd_clear_timer(device); 301 dasd_device_clear_timer(device);
302 dasd_schedule_bh(device); 302 dasd_schedule_device_bh(device);
303 303
304 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 304 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
305} 305}
@@ -309,6 +309,7 @@ dasd_ext_handler(__u16 code)
309static int 309static int
310dasd_diag_check_device(struct dasd_device *device) 310dasd_diag_check_device(struct dasd_device *device)
311{ 311{
312 struct dasd_block *block;
312 struct dasd_diag_private *private; 313 struct dasd_diag_private *private;
313 struct dasd_diag_characteristics *rdc_data; 314 struct dasd_diag_characteristics *rdc_data;
314 struct dasd_diag_bio bio; 315 struct dasd_diag_bio bio;
@@ -328,6 +329,16 @@ dasd_diag_check_device(struct dasd_device *device)
328 ccw_device_get_id(device->cdev, &private->dev_id); 329 ccw_device_get_id(device->cdev, &private->dev_id);
329 device->private = (void *) private; 330 device->private = (void *) private;
330 } 331 }
332 block = dasd_alloc_block();
333 if (IS_ERR(block)) {
334 DEV_MESSAGE(KERN_WARNING, device, "%s",
335 "could not allocate dasd block structure");
336 kfree(device->private);
337 return PTR_ERR(block);
338 }
339 device->block = block;
340 block->base = device;
341
331 /* Read Device Characteristics */ 342 /* Read Device Characteristics */
332 rdc_data = (void *) &(private->rdc_data); 343 rdc_data = (void *) &(private->rdc_data);
333 rdc_data->dev_nr = private->dev_id.devno; 344 rdc_data->dev_nr = private->dev_id.devno;
@@ -409,14 +420,14 @@ dasd_diag_check_device(struct dasd_device *device)
409 sizeof(DASD_DIAG_CMS1)) == 0) { 420 sizeof(DASD_DIAG_CMS1)) == 0) {
410 /* get formatted blocksize from label block */ 421 /* get formatted blocksize from label block */
411 bsize = (unsigned int) label->block_size; 422 bsize = (unsigned int) label->block_size;
412 device->blocks = (unsigned long) label->block_count; 423 block->blocks = (unsigned long) label->block_count;
413 } else 424 } else
414 device->blocks = end_block; 425 block->blocks = end_block;
415 device->bp_block = bsize; 426 block->bp_block = bsize;
416 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 427 block->s2b_shift = 0; /* bits to shift 512 to get a block */
417 for (sb = 512; sb < bsize; sb = sb << 1) 428 for (sb = 512; sb < bsize; sb = sb << 1)
418 device->s2b_shift++; 429 block->s2b_shift++;
419 rc = mdsk_init_io(device, device->bp_block, 0, NULL); 430 rc = mdsk_init_io(device, block->bp_block, 0, NULL);
420 if (rc) { 431 if (rc) {
421 DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization " 432 DEV_MESSAGE(KERN_WARNING, device, "DIAG initialization "
422 "failed (rc=%d)", rc); 433 "failed (rc=%d)", rc);
@@ -424,9 +435,9 @@ dasd_diag_check_device(struct dasd_device *device)
424 } else { 435 } else {
425 DEV_MESSAGE(KERN_INFO, device, 436 DEV_MESSAGE(KERN_INFO, device,
426 "(%ld B/blk): %ldkB", 437 "(%ld B/blk): %ldkB",
427 (unsigned long) device->bp_block, 438 (unsigned long) block->bp_block,
428 (unsigned long) (device->blocks << 439 (unsigned long) (block->blocks <<
429 device->s2b_shift) >> 1); 440 block->s2b_shift) >> 1);
430 } 441 }
431out: 442out:
432 free_page((long) label); 443 free_page((long) label);
@@ -436,22 +447,16 @@ out:
436/* Fill in virtual disk geometry for device. Return zero on success, non-zero 447/* Fill in virtual disk geometry for device. Return zero on success, non-zero
437 * otherwise. */ 448 * otherwise. */
438static int 449static int
439dasd_diag_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 450dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
440{ 451{
441 if (dasd_check_blocksize(device->bp_block) != 0) 452 if (dasd_check_blocksize(block->bp_block) != 0)
442 return -EINVAL; 453 return -EINVAL;
443 geo->cylinders = (device->blocks << device->s2b_shift) >> 10; 454 geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
444 geo->heads = 16; 455 geo->heads = 16;
445 geo->sectors = 128 >> device->s2b_shift; 456 geo->sectors = 128 >> block->s2b_shift;
446 return 0; 457 return 0;
447} 458}
448 459
449static dasd_era_t
450dasd_diag_examine_error(struct dasd_ccw_req * cqr, struct irb * stat)
451{
452 return dasd_era_fatal;
453}
454
455static dasd_erp_fn_t 460static dasd_erp_fn_t
456dasd_diag_erp_action(struct dasd_ccw_req * cqr) 461dasd_diag_erp_action(struct dasd_ccw_req * cqr)
457{ 462{
@@ -466,8 +471,9 @@ dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
466 471
467/* Create DASD request from block device request. Return pointer to new 472/* Create DASD request from block device request. Return pointer to new
468 * request on success, ERR_PTR otherwise. */ 473 * request on success, ERR_PTR otherwise. */
469static struct dasd_ccw_req * 474static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
470dasd_diag_build_cp(struct dasd_device * device, struct request *req) 475 struct dasd_block *block,
476 struct request *req)
471{ 477{
472 struct dasd_ccw_req *cqr; 478 struct dasd_ccw_req *cqr;
473 struct dasd_diag_req *dreq; 479 struct dasd_diag_req *dreq;
@@ -486,17 +492,17 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
486 rw_cmd = MDSK_WRITE_REQ; 492 rw_cmd = MDSK_WRITE_REQ;
487 else 493 else
488 return ERR_PTR(-EINVAL); 494 return ERR_PTR(-EINVAL);
489 blksize = device->bp_block; 495 blksize = block->bp_block;
490 /* Calculate record id of first and last block. */ 496 /* Calculate record id of first and last block. */
491 first_rec = req->sector >> device->s2b_shift; 497 first_rec = req->sector >> block->s2b_shift;
492 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 498 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
493 /* Check struct bio and count the number of blocks for the request. */ 499 /* Check struct bio and count the number of blocks for the request. */
494 count = 0; 500 count = 0;
495 rq_for_each_segment(bv, req, iter) { 501 rq_for_each_segment(bv, req, iter) {
496 if (bv->bv_len & (blksize - 1)) 502 if (bv->bv_len & (blksize - 1))
497 /* Fba can only do full blocks. */ 503 /* Fba can only do full blocks. */
498 return ERR_PTR(-EINVAL); 504 return ERR_PTR(-EINVAL);
499 count += bv->bv_len >> (device->s2b_shift + 9); 505 count += bv->bv_len >> (block->s2b_shift + 9);
500 } 506 }
501 /* Paranoia. */ 507 /* Paranoia. */
502 if (count != last_rec - first_rec + 1) 508 if (count != last_rec - first_rec + 1)
@@ -505,7 +511,7 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
505 datasize = sizeof(struct dasd_diag_req) + 511 datasize = sizeof(struct dasd_diag_req) +
506 count*sizeof(struct dasd_diag_bio); 512 count*sizeof(struct dasd_diag_bio);
507 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0, 513 cqr = dasd_smalloc_request(dasd_diag_discipline.name, 0,
508 datasize, device); 514 datasize, memdev);
509 if (IS_ERR(cqr)) 515 if (IS_ERR(cqr))
510 return cqr; 516 return cqr;
511 517
@@ -529,7 +535,9 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
529 cqr->buildclk = get_clock(); 535 cqr->buildclk = get_clock();
530 if (req->cmd_flags & REQ_FAILFAST) 536 if (req->cmd_flags & REQ_FAILFAST)
531 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 537 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
532 cqr->device = device; 538 cqr->startdev = memdev;
539 cqr->memdev = memdev;
540 cqr->block = block;
533 cqr->expires = DIAG_TIMEOUT; 541 cqr->expires = DIAG_TIMEOUT;
534 cqr->status = DASD_CQR_FILLED; 542 cqr->status = DASD_CQR_FILLED;
535 return cqr; 543 return cqr;
@@ -543,10 +551,15 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
543 int status; 551 int status;
544 552
545 status = cqr->status == DASD_CQR_DONE; 553 status = cqr->status == DASD_CQR_DONE;
546 dasd_sfree_request(cqr, cqr->device); 554 dasd_sfree_request(cqr, cqr->memdev);
547 return status; 555 return status;
548} 556}
549 557
558static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
559{
560 cqr->status = DASD_CQR_FILLED;
561};
562
550/* Fill in IOCTL data for device. */ 563/* Fill in IOCTL data for device. */
551static int 564static int
552dasd_diag_fill_info(struct dasd_device * device, 565dasd_diag_fill_info(struct dasd_device * device,
@@ -583,7 +596,7 @@ static struct dasd_discipline dasd_diag_discipline = {
583 .fill_geometry = dasd_diag_fill_geometry, 596 .fill_geometry = dasd_diag_fill_geometry,
584 .start_IO = dasd_start_diag, 597 .start_IO = dasd_start_diag,
585 .term_IO = dasd_diag_term_IO, 598 .term_IO = dasd_diag_term_IO,
586 .examine_error = dasd_diag_examine_error, 599 .handle_terminated_request = dasd_diag_handle_terminated_request,
587 .erp_action = dasd_diag_erp_action, 600 .erp_action = dasd_diag_erp_action,
588 .erp_postaction = dasd_diag_erp_postaction, 601 .erp_postaction = dasd_diag_erp_postaction,
589 .build_cp = dasd_diag_build_cp, 602 .build_cp = dasd_diag_build_cp,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6038d9195e27..61f16937c1e0 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -52,16 +52,6 @@ MODULE_LICENSE("GPL");
52 52
53static struct dasd_discipline dasd_eckd_discipline; 53static struct dasd_discipline dasd_eckd_discipline;
54 54
55struct dasd_eckd_private {
56 struct dasd_eckd_characteristics rdc_data;
57 struct dasd_eckd_confdata conf_data;
58 struct dasd_eckd_path path_data;
59 struct eckd_count count_area[5];
60 int init_cqr_status;
61 int uses_cdl;
62 struct attrib_data_t attrib; /* e.g. cache operations */
63};
64
65/* The ccw bus type uses this table to find devices that it sends to 55/* The ccw bus type uses this table to find devices that it sends to
66 * dasd_eckd_probe */ 56 * dasd_eckd_probe */
67static struct ccw_device_id dasd_eckd_ids[] = { 57static struct ccw_device_id dasd_eckd_ids[] = {
@@ -188,7 +178,7 @@ check_XRC (struct ccw1 *de_ccw,
188 if (rc == -ENOSYS || rc == -EACCES) 178 if (rc == -ENOSYS || rc == -EACCES)
189 rc = 0; 179 rc = 0;
190 180
191 de_ccw->count = sizeof (struct DE_eckd_data); 181 de_ccw->count = sizeof(struct DE_eckd_data);
192 de_ccw->flags |= CCW_FLAG_SLI; 182 de_ccw->flags |= CCW_FLAG_SLI;
193 return rc; 183 return rc;
194} 184}
@@ -208,7 +198,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
208 ccw->count = 16; 198 ccw->count = 16;
209 ccw->cda = (__u32) __pa(data); 199 ccw->cda = (__u32) __pa(data);
210 200
211 memset(data, 0, sizeof (struct DE_eckd_data)); 201 memset(data, 0, sizeof(struct DE_eckd_data));
212 switch (cmd) { 202 switch (cmd) {
213 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 203 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
214 case DASD_ECKD_CCW_READ_RECORD_ZERO: 204 case DASD_ECKD_CCW_READ_RECORD_ZERO:
@@ -280,6 +270,132 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
280 return rc; 270 return rc;
281} 271}
282 272
273static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
274 struct dasd_device *device)
275{
276 struct dasd_eckd_private *private;
277 int rc;
278
279 private = (struct dasd_eckd_private *) device->private;
280 if (!private->rdc_data.facilities.XRC_supported)
281 return 0;
282
283 /* switch on System Time Stamp - needed for XRC Support */
284 pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid' */
285 pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */
286 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
287
288 rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time);
289 /* Ignore return code if sync clock is switched off. */
290 if (rc == -ENOSYS || rc == -EACCES)
291 rc = 0;
292 return rc;
293}
294
295static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
296 int totrk, int cmd, struct dasd_device *basedev,
297 struct dasd_device *startdev)
298{
299 struct dasd_eckd_private *basepriv, *startpriv;
300 struct DE_eckd_data *data;
301 struct ch_t geo, beg, end;
302 int rc = 0;
303
304 basepriv = (struct dasd_eckd_private *) basedev->private;
305 startpriv = (struct dasd_eckd_private *) startdev->private;
306 data = &pfxdata->define_extend;
307
308 ccw->cmd_code = DASD_ECKD_CCW_PFX;
309 ccw->flags = 0;
310 ccw->count = sizeof(*pfxdata);
311 ccw->cda = (__u32) __pa(pfxdata);
312
313 memset(pfxdata, 0, sizeof(*pfxdata));
314 /* prefix data */
315 pfxdata->format = 0;
316 pfxdata->base_address = basepriv->conf_data.ned1.unit_addr;
317 pfxdata->base_lss = basepriv->conf_data.ned1.ID;
318 pfxdata->validity.define_extend = 1;
319
320 /* private uid is kept up to date, conf_data may be outdated */
321 if (startpriv->uid.type != UA_BASE_DEVICE) {
322 pfxdata->validity.verify_base = 1;
323 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
324 pfxdata->validity.hyper_pav = 1;
325 }
326
327 /* define extend data (mostly)*/
328 switch (cmd) {
329 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
330 case DASD_ECKD_CCW_READ_RECORD_ZERO:
331 case DASD_ECKD_CCW_READ:
332 case DASD_ECKD_CCW_READ_MT:
333 case DASD_ECKD_CCW_READ_CKD:
334 case DASD_ECKD_CCW_READ_CKD_MT:
335 case DASD_ECKD_CCW_READ_KD:
336 case DASD_ECKD_CCW_READ_KD_MT:
337 case DASD_ECKD_CCW_READ_COUNT:
338 data->mask.perm = 0x1;
339 data->attributes.operation = basepriv->attrib.operation;
340 break;
341 case DASD_ECKD_CCW_WRITE:
342 case DASD_ECKD_CCW_WRITE_MT:
343 case DASD_ECKD_CCW_WRITE_KD:
344 case DASD_ECKD_CCW_WRITE_KD_MT:
345 data->mask.perm = 0x02;
346 data->attributes.operation = basepriv->attrib.operation;
347 rc = check_XRC_on_prefix(pfxdata, basedev);
348 break;
349 case DASD_ECKD_CCW_WRITE_CKD:
350 case DASD_ECKD_CCW_WRITE_CKD_MT:
351 data->attributes.operation = DASD_BYPASS_CACHE;
352 rc = check_XRC_on_prefix(pfxdata, basedev);
353 break;
354 case DASD_ECKD_CCW_ERASE:
355 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
356 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
357 data->mask.perm = 0x3;
358 data->mask.auth = 0x1;
359 data->attributes.operation = DASD_BYPASS_CACHE;
360 rc = check_XRC_on_prefix(pfxdata, basedev);
361 break;
362 default:
363 DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd);
364 break;
365 }
366
367 data->attributes.mode = 0x3; /* ECKD */
368
369 if ((basepriv->rdc_data.cu_type == 0x2105 ||
370 basepriv->rdc_data.cu_type == 0x2107 ||
371 basepriv->rdc_data.cu_type == 0x1750)
372 && !(basepriv->uses_cdl && trk < 2))
373 data->ga_extended |= 0x40; /* Regular Data Format Mode */
374
375 geo.cyl = basepriv->rdc_data.no_cyl;
376 geo.head = basepriv->rdc_data.trk_per_cyl;
377 beg.cyl = trk / geo.head;
378 beg.head = trk % geo.head;
379 end.cyl = totrk / geo.head;
380 end.head = totrk % geo.head;
381
382 /* check for sequential prestage - enhance cylinder range */
383 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
384 data->attributes.operation == DASD_SEQ_ACCESS) {
385
386 if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl)
387 end.cyl += basepriv->attrib.nr_cyl;
388 else
389 end.cyl = (geo.cyl - 1);
390 }
391
392 data->beg_ext.cyl = beg.cyl;
393 data->beg_ext.head = beg.head;
394 data->end_ext.cyl = end.cyl;
395 data->end_ext.head = end.head;
396 return rc;
397}
398
283static void 399static void
284locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 400locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
285 int rec_on_trk, int no_rec, int cmd, 401 int rec_on_trk, int no_rec, int cmd,
@@ -300,7 +416,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
300 ccw->count = 16; 416 ccw->count = 16;
301 ccw->cda = (__u32) __pa(data); 417 ccw->cda = (__u32) __pa(data);
302 418
303 memset(data, 0, sizeof (struct LO_eckd_data)); 419 memset(data, 0, sizeof(struct LO_eckd_data));
304 sector = 0; 420 sector = 0;
305 if (rec_on_trk) { 421 if (rec_on_trk) {
306 switch (private->rdc_data.dev_type) { 422 switch (private->rdc_data.dev_type) {
@@ -441,12 +557,15 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
441 sizeof(uid->serial) - 1); 557 sizeof(uid->serial) - 1);
442 EBCASC(uid->serial, sizeof(uid->serial) - 1); 558 EBCASC(uid->serial, sizeof(uid->serial) - 1);
443 uid->ssid = confdata->neq.subsystemID; 559 uid->ssid = confdata->neq.subsystemID;
444 if (confdata->ned2.sneq.flags == 0x40) { 560 uid->real_unit_addr = confdata->ned1.unit_addr;
445 uid->alias = 1; 561 if (confdata->ned2.sneq.flags == 0x40 &&
446 uid->unit_addr = confdata->ned2.sneq.base_unit_addr; 562 confdata->ned2.sneq.format == 0x0001) {
447 } else 563 uid->type = confdata->ned2.sneq.sua_flags;
448 uid->unit_addr = confdata->ned1.unit_addr; 564 if (uid->type == UA_BASE_PAV_ALIAS)
449 565 uid->base_unit_addr = confdata->ned2.sneq.base_unit_addr;
566 } else {
567 uid->type = UA_BASE_DEVICE;
568 }
450 return 0; 569 return 0;
451} 570}
452 571
@@ -470,7 +589,9 @@ static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
470 ccw->cda = (__u32)(addr_t)rcd_buffer; 589 ccw->cda = (__u32)(addr_t)rcd_buffer;
471 ccw->count = ciw->count; 590 ccw->count = ciw->count;
472 591
473 cqr->device = device; 592 cqr->startdev = device;
593 cqr->memdev = device;
594 cqr->block = NULL;
474 cqr->expires = 10*HZ; 595 cqr->expires = 10*HZ;
475 cqr->lpm = lpm; 596 cqr->lpm = lpm;
476 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 597 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -511,7 +632,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
511 /* 632 /*
512 * on success we update the user input parms 633 * on success we update the user input parms
513 */ 634 */
514 dasd_sfree_request(cqr, cqr->device); 635 dasd_sfree_request(cqr, cqr->memdev);
515 if (ret) 636 if (ret)
516 goto out_error; 637 goto out_error;
517 638
@@ -557,19 +678,19 @@ dasd_eckd_read_conf(struct dasd_device *device)
557 "data retrieved"); 678 "data retrieved");
558 continue; /* no error */ 679 continue; /* no error */
559 } 680 }
560 if (conf_len != sizeof (struct dasd_eckd_confdata)) { 681 if (conf_len != sizeof(struct dasd_eckd_confdata)) {
561 MESSAGE(KERN_WARNING, 682 MESSAGE(KERN_WARNING,
562 "sizes of configuration data mismatch" 683 "sizes of configuration data mismatch"
563 "%d (read) vs %ld (expected)", 684 "%d (read) vs %ld (expected)",
564 conf_len, 685 conf_len,
565 sizeof (struct dasd_eckd_confdata)); 686 sizeof(struct dasd_eckd_confdata));
566 kfree(conf_data); 687 kfree(conf_data);
567 continue; /* no error */ 688 continue; /* no error */
568 } 689 }
569 /* save first valid configuration data */ 690 /* save first valid configuration data */
570 if (!conf_data_saved){ 691 if (!conf_data_saved){
571 memcpy(&private->conf_data, conf_data, 692 memcpy(&private->conf_data, conf_data,
572 sizeof (struct dasd_eckd_confdata)); 693 sizeof(struct dasd_eckd_confdata));
573 conf_data_saved++; 694 conf_data_saved++;
574 } 695 }
575 switch (((char *)conf_data)[242] & 0x07){ 696 switch (((char *)conf_data)[242] & 0x07){
@@ -586,39 +707,104 @@ dasd_eckd_read_conf(struct dasd_device *device)
586 return 0; 707 return 0;
587} 708}
588 709
710static int dasd_eckd_read_features(struct dasd_device *device)
711{
712 struct dasd_psf_prssd_data *prssdp;
713 struct dasd_rssd_features *features;
714 struct dasd_ccw_req *cqr;
715 struct ccw1 *ccw;
716 int rc;
717 struct dasd_eckd_private *private;
718
719 private = (struct dasd_eckd_private *) device->private;
720 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
721 1 /* PSF */ + 1 /* RSSD */ ,
722 (sizeof(struct dasd_psf_prssd_data) +
723 sizeof(struct dasd_rssd_features)),
724 device);
725 if (IS_ERR(cqr)) {
726 DEV_MESSAGE(KERN_WARNING, device, "%s",
727 "Could not allocate initialization request");
728 return PTR_ERR(cqr);
729 }
730 cqr->startdev = device;
731 cqr->memdev = device;
732 cqr->block = NULL;
733 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
734 cqr->retries = 5;
735 cqr->expires = 10 * HZ;
736
737 /* Prepare for Read Subsystem Data */
738 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
739 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
740 prssdp->order = PSF_ORDER_PRSSD;
741 prssdp->suborder = 0x41; /* Read Feature Codes */
742 /* all other bytes of prssdp must be zero */
743
744 ccw = cqr->cpaddr;
745 ccw->cmd_code = DASD_ECKD_CCW_PSF;
746 ccw->count = sizeof(struct dasd_psf_prssd_data);
747 ccw->flags |= CCW_FLAG_CC;
748 ccw->cda = (__u32)(addr_t) prssdp;
749
750 /* Read Subsystem Data - feature codes */
751 features = (struct dasd_rssd_features *) (prssdp + 1);
752 memset(features, 0, sizeof(struct dasd_rssd_features));
753
754 ccw++;
755 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
756 ccw->count = sizeof(struct dasd_rssd_features);
757 ccw->cda = (__u32)(addr_t) features;
758
759 cqr->buildclk = get_clock();
760 cqr->status = DASD_CQR_FILLED;
761 rc = dasd_sleep_on(cqr);
762 if (rc == 0) {
763 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
764 features = (struct dasd_rssd_features *) (prssdp + 1);
765 memcpy(&private->features, features,
766 sizeof(struct dasd_rssd_features));
767 }
768 dasd_sfree_request(cqr, cqr->memdev);
769 return rc;
770}
771
772
589/* 773/*
590 * Build CP for Perform Subsystem Function - SSC. 774 * Build CP for Perform Subsystem Function - SSC.
591 */ 775 */
592static struct dasd_ccw_req * 776static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
593dasd_eckd_build_psf_ssc(struct dasd_device *device)
594{ 777{
595 struct dasd_ccw_req *cqr; 778 struct dasd_ccw_req *cqr;
596 struct dasd_psf_ssc_data *psf_ssc_data; 779 struct dasd_psf_ssc_data *psf_ssc_data;
597 struct ccw1 *ccw; 780 struct ccw1 *ccw;
598 781
599 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , 782 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
600 sizeof(struct dasd_psf_ssc_data), 783 sizeof(struct dasd_psf_ssc_data),
601 device); 784 device);
602 785
603 if (IS_ERR(cqr)) { 786 if (IS_ERR(cqr)) {
604 DEV_MESSAGE(KERN_WARNING, device, "%s", 787 DEV_MESSAGE(KERN_WARNING, device, "%s",
605 "Could not allocate PSF-SSC request"); 788 "Could not allocate PSF-SSC request");
606 return cqr; 789 return cqr;
607 } 790 }
608 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 791 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
609 psf_ssc_data->order = PSF_ORDER_SSC; 792 psf_ssc_data->order = PSF_ORDER_SSC;
610 psf_ssc_data->suborder = 0x08; 793 psf_ssc_data->suborder = 0x88;
611 794 psf_ssc_data->reserved[0] = 0x88;
612 ccw = cqr->cpaddr; 795
613 ccw->cmd_code = DASD_ECKD_CCW_PSF; 796 ccw = cqr->cpaddr;
614 ccw->cda = (__u32)(addr_t)psf_ssc_data; 797 ccw->cmd_code = DASD_ECKD_CCW_PSF;
615 ccw->count = 66; 798 ccw->cda = (__u32)(addr_t)psf_ssc_data;
616 799 ccw->count = 66;
617 cqr->device = device; 800
618 cqr->expires = 10*HZ; 801 cqr->startdev = device;
619 cqr->buildclk = get_clock(); 802 cqr->memdev = device;
620 cqr->status = DASD_CQR_FILLED; 803 cqr->block = NULL;
621 return cqr; 804 cqr->expires = 10*HZ;
805 cqr->buildclk = get_clock();
806 cqr->status = DASD_CQR_FILLED;
807 return cqr;
622} 808}
623 809
624/* 810/*
@@ -629,28 +815,28 @@ dasd_eckd_build_psf_ssc(struct dasd_device *device)
629static int 815static int
630dasd_eckd_psf_ssc(struct dasd_device *device) 816dasd_eckd_psf_ssc(struct dasd_device *device)
631{ 817{
632 struct dasd_ccw_req *cqr; 818 struct dasd_ccw_req *cqr;
633 int rc; 819 int rc;
634 820
635 cqr = dasd_eckd_build_psf_ssc(device); 821 cqr = dasd_eckd_build_psf_ssc(device);
636 if (IS_ERR(cqr)) 822 if (IS_ERR(cqr))
637 return PTR_ERR(cqr); 823 return PTR_ERR(cqr);
638 824
639 rc = dasd_sleep_on(cqr); 825 rc = dasd_sleep_on(cqr);
640 if (!rc) 826 if (!rc)
641 /* trigger CIO to reprobe devices */ 827 /* trigger CIO to reprobe devices */
642 css_schedule_reprobe(); 828 css_schedule_reprobe();
643 dasd_sfree_request(cqr, cqr->device); 829 dasd_sfree_request(cqr, cqr->memdev);
644 return rc; 830 return rc;
645} 831}
646 832
647/* 833/*
648 * Valide storage server of current device. 834 * Valide storage server of current device.
649 */ 835 */
650static int 836static int dasd_eckd_validate_server(struct dasd_device *device)
651dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
652{ 837{
653 int rc; 838 int rc;
839 struct dasd_eckd_private *private;
654 840
655 /* Currently PAV is the only reason to 'validate' server on LPAR */ 841 /* Currently PAV is the only reason to 'validate' server on LPAR */
656 if (dasd_nopav || MACHINE_IS_VM) 842 if (dasd_nopav || MACHINE_IS_VM)
@@ -659,9 +845,11 @@ dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
659 rc = dasd_eckd_psf_ssc(device); 845 rc = dasd_eckd_psf_ssc(device);
660 /* may be requested feature is not available on server, 846 /* may be requested feature is not available on server,
661 * therefore just report error and go ahead */ 847 * therefore just report error and go ahead */
848 private = (struct dasd_eckd_private *) device->private;
662 DEV_MESSAGE(KERN_INFO, device, 849 DEV_MESSAGE(KERN_INFO, device,
663 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", 850 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
664 uid->vendor, uid->serial, uid->ssid, rc); 851 private->uid.vendor, private->uid.serial,
852 private->uid.ssid, rc);
665 /* RE-Read Configuration Data */ 853 /* RE-Read Configuration Data */
666 return dasd_eckd_read_conf(device); 854 return dasd_eckd_read_conf(device);
667} 855}
@@ -674,9 +862,9 @@ static int
674dasd_eckd_check_characteristics(struct dasd_device *device) 862dasd_eckd_check_characteristics(struct dasd_device *device)
675{ 863{
676 struct dasd_eckd_private *private; 864 struct dasd_eckd_private *private;
677 struct dasd_uid uid; 865 struct dasd_block *block;
678 void *rdc_data; 866 void *rdc_data;
679 int rc; 867 int is_known, rc;
680 868
681 private = (struct dasd_eckd_private *) device->private; 869 private = (struct dasd_eckd_private *) device->private;
682 if (private == NULL) { 870 if (private == NULL) {
@@ -699,27 +887,54 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
699 /* Read Configuration Data */ 887 /* Read Configuration Data */
700 rc = dasd_eckd_read_conf(device); 888 rc = dasd_eckd_read_conf(device);
701 if (rc) 889 if (rc)
702 return rc; 890 goto out_err1;
703 891
704 /* Generate device unique id and register in devmap */ 892 /* Generate device unique id and register in devmap */
705 rc = dasd_eckd_generate_uid(device, &uid); 893 rc = dasd_eckd_generate_uid(device, &private->uid);
706 if (rc) 894 if (rc)
707 return rc; 895 goto out_err1;
708 rc = dasd_set_uid(device->cdev, &uid); 896 dasd_set_uid(device->cdev, &private->uid);
709 if (rc == 1) /* new server found */ 897
710 rc = dasd_eckd_validate_server(device, &uid); 898 if (private->uid.type == UA_BASE_DEVICE) {
899 block = dasd_alloc_block();
900 if (IS_ERR(block)) {
901 DEV_MESSAGE(KERN_WARNING, device, "%s",
902 "could not allocate dasd block structure");
903 rc = PTR_ERR(block);
904 goto out_err1;
905 }
906 device->block = block;
907 block->base = device;
908 }
909
910 /* register lcu with alias handling, enable PAV if this is a new lcu */
911 is_known = dasd_alias_make_device_known_to_lcu(device);
912 if (is_known < 0) {
913 rc = is_known;
914 goto out_err2;
915 }
916 if (!is_known) {
917 /* new lcu found */
918 rc = dasd_eckd_validate_server(device); /* will switch pav on */
919 if (rc)
920 goto out_err3;
921 }
922
923 /* Read Feature Codes */
924 rc = dasd_eckd_read_features(device);
711 if (rc) 925 if (rc)
712 return rc; 926 goto out_err3;
713 927
714 /* Read Device Characteristics */ 928 /* Read Device Characteristics */
715 rdc_data = (void *) &(private->rdc_data); 929 rdc_data = (void *) &(private->rdc_data);
716 memset(rdc_data, 0, sizeof(rdc_data)); 930 memset(rdc_data, 0, sizeof(rdc_data));
717 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); 931 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
718 if (rc) 932 if (rc) {
719 DEV_MESSAGE(KERN_WARNING, device, 933 DEV_MESSAGE(KERN_WARNING, device,
720 "Read device characteristics returned " 934 "Read device characteristics returned "
721 "rc=%d", rc); 935 "rc=%d", rc);
722 936 goto out_err3;
937 }
723 DEV_MESSAGE(KERN_INFO, device, 938 DEV_MESSAGE(KERN_INFO, device,
724 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", 939 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
725 private->rdc_data.dev_type, 940 private->rdc_data.dev_type,
@@ -729,9 +944,24 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
729 private->rdc_data.no_cyl, 944 private->rdc_data.no_cyl,
730 private->rdc_data.trk_per_cyl, 945 private->rdc_data.trk_per_cyl,
731 private->rdc_data.sec_per_trk); 946 private->rdc_data.sec_per_trk);
947 return 0;
948
949out_err3:
950 dasd_alias_disconnect_device_from_lcu(device);
951out_err2:
952 dasd_free_block(device->block);
953 device->block = NULL;
954out_err1:
955 kfree(device->private);
956 device->private = NULL;
732 return rc; 957 return rc;
733} 958}
734 959
960static void dasd_eckd_uncheck_device(struct dasd_device *device)
961{
962 dasd_alias_disconnect_device_from_lcu(device);
963}
964
735static struct dasd_ccw_req * 965static struct dasd_ccw_req *
736dasd_eckd_analysis_ccw(struct dasd_device *device) 966dasd_eckd_analysis_ccw(struct dasd_device *device)
737{ 967{
@@ -755,7 +985,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
755 /* Define extent for the first 3 tracks. */ 985 /* Define extent for the first 3 tracks. */
756 define_extent(ccw++, cqr->data, 0, 2, 986 define_extent(ccw++, cqr->data, 0, 2,
757 DASD_ECKD_CCW_READ_COUNT, device); 987 DASD_ECKD_CCW_READ_COUNT, device);
758 LO_data = cqr->data + sizeof (struct DE_eckd_data); 988 LO_data = cqr->data + sizeof(struct DE_eckd_data);
759 /* Locate record for the first 4 records on track 0. */ 989 /* Locate record for the first 4 records on track 0. */
760 ccw[-1].flags |= CCW_FLAG_CC; 990 ccw[-1].flags |= CCW_FLAG_CC;
761 locate_record(ccw++, LO_data++, 0, 0, 4, 991 locate_record(ccw++, LO_data++, 0, 0, 4,
@@ -783,7 +1013,9 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
783 ccw->count = 8; 1013 ccw->count = 8;
784 ccw->cda = (__u32)(addr_t) count_data; 1014 ccw->cda = (__u32)(addr_t) count_data;
785 1015
786 cqr->device = device; 1016 cqr->block = NULL;
1017 cqr->startdev = device;
1018 cqr->memdev = device;
787 cqr->retries = 0; 1019 cqr->retries = 0;
788 cqr->buildclk = get_clock(); 1020 cqr->buildclk = get_clock();
789 cqr->status = DASD_CQR_FILLED; 1021 cqr->status = DASD_CQR_FILLED;
@@ -803,7 +1035,7 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
803 struct dasd_eckd_private *private; 1035 struct dasd_eckd_private *private;
804 struct dasd_device *device; 1036 struct dasd_device *device;
805 1037
806 device = init_cqr->device; 1038 device = init_cqr->startdev;
807 private = (struct dasd_eckd_private *) device->private; 1039 private = (struct dasd_eckd_private *) device->private;
808 private->init_cqr_status = init_cqr->status; 1040 private->init_cqr_status = init_cqr->status;
809 dasd_sfree_request(init_cqr, device); 1041 dasd_sfree_request(init_cqr, device);
@@ -811,13 +1043,13 @@ dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
811} 1043}
812 1044
813static int 1045static int
814dasd_eckd_start_analysis(struct dasd_device *device) 1046dasd_eckd_start_analysis(struct dasd_block *block)
815{ 1047{
816 struct dasd_eckd_private *private; 1048 struct dasd_eckd_private *private;
817 struct dasd_ccw_req *init_cqr; 1049 struct dasd_ccw_req *init_cqr;
818 1050
819 private = (struct dasd_eckd_private *) device->private; 1051 private = (struct dasd_eckd_private *) block->base->private;
820 init_cqr = dasd_eckd_analysis_ccw(device); 1052 init_cqr = dasd_eckd_analysis_ccw(block->base);
821 if (IS_ERR(init_cqr)) 1053 if (IS_ERR(init_cqr))
822 return PTR_ERR(init_cqr); 1054 return PTR_ERR(init_cqr);
823 init_cqr->callback = dasd_eckd_analysis_callback; 1055 init_cqr->callback = dasd_eckd_analysis_callback;
@@ -828,13 +1060,15 @@ dasd_eckd_start_analysis(struct dasd_device *device)
828} 1060}
829 1061
830static int 1062static int
831dasd_eckd_end_analysis(struct dasd_device *device) 1063dasd_eckd_end_analysis(struct dasd_block *block)
832{ 1064{
1065 struct dasd_device *device;
833 struct dasd_eckd_private *private; 1066 struct dasd_eckd_private *private;
834 struct eckd_count *count_area; 1067 struct eckd_count *count_area;
835 unsigned int sb, blk_per_trk; 1068 unsigned int sb, blk_per_trk;
836 int status, i; 1069 int status, i;
837 1070
1071 device = block->base;
838 private = (struct dasd_eckd_private *) device->private; 1072 private = (struct dasd_eckd_private *) device->private;
839 status = private->init_cqr_status; 1073 status = private->init_cqr_status;
840 private->init_cqr_status = -1; 1074 private->init_cqr_status = -1;
@@ -846,7 +1080,7 @@ dasd_eckd_end_analysis(struct dasd_device *device)
846 1080
847 private->uses_cdl = 1; 1081 private->uses_cdl = 1;
848 /* Calculate number of blocks/records per track. */ 1082 /* Calculate number of blocks/records per track. */
849 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block); 1083 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
850 /* Check Track 0 for Compatible Disk Layout */ 1084 /* Check Track 0 for Compatible Disk Layout */
851 count_area = NULL; 1085 count_area = NULL;
852 for (i = 0; i < 3; i++) { 1086 for (i = 0; i < 3; i++) {
@@ -876,56 +1110,65 @@ dasd_eckd_end_analysis(struct dasd_device *device)
876 if (count_area != NULL && count_area->kl == 0) { 1110 if (count_area != NULL && count_area->kl == 0) {
877 /* we found notthing violating our disk layout */ 1111 /* we found notthing violating our disk layout */
878 if (dasd_check_blocksize(count_area->dl) == 0) 1112 if (dasd_check_blocksize(count_area->dl) == 0)
879 device->bp_block = count_area->dl; 1113 block->bp_block = count_area->dl;
880 } 1114 }
881 if (device->bp_block == 0) { 1115 if (block->bp_block == 0) {
882 DEV_MESSAGE(KERN_WARNING, device, "%s", 1116 DEV_MESSAGE(KERN_WARNING, device, "%s",
883 "Volume has incompatible disk layout"); 1117 "Volume has incompatible disk layout");
884 return -EMEDIUMTYPE; 1118 return -EMEDIUMTYPE;
885 } 1119 }
886 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 1120 block->s2b_shift = 0; /* bits to shift 512 to get a block */
887 for (sb = 512; sb < device->bp_block; sb = sb << 1) 1121 for (sb = 512; sb < block->bp_block; sb = sb << 1)
888 device->s2b_shift++; 1122 block->s2b_shift++;
889 1123
890 blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block); 1124 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
891 device->blocks = (private->rdc_data.no_cyl * 1125 block->blocks = (private->rdc_data.no_cyl *
892 private->rdc_data.trk_per_cyl * 1126 private->rdc_data.trk_per_cyl *
893 blk_per_trk); 1127 blk_per_trk);
894 1128
895 DEV_MESSAGE(KERN_INFO, device, 1129 DEV_MESSAGE(KERN_INFO, device,
896 "(%dkB blks): %dkB at %dkB/trk %s", 1130 "(%dkB blks): %dkB at %dkB/trk %s",
897 (device->bp_block >> 10), 1131 (block->bp_block >> 10),
898 ((private->rdc_data.no_cyl * 1132 ((private->rdc_data.no_cyl *
899 private->rdc_data.trk_per_cyl * 1133 private->rdc_data.trk_per_cyl *
900 blk_per_trk * (device->bp_block >> 9)) >> 1), 1134 blk_per_trk * (block->bp_block >> 9)) >> 1),
901 ((blk_per_trk * device->bp_block) >> 10), 1135 ((blk_per_trk * block->bp_block) >> 10),
902 private->uses_cdl ? 1136 private->uses_cdl ?
903 "compatible disk layout" : "linux disk layout"); 1137 "compatible disk layout" : "linux disk layout");
904 1138
905 return 0; 1139 return 0;
906} 1140}
907 1141
908static int 1142static int dasd_eckd_do_analysis(struct dasd_block *block)
909dasd_eckd_do_analysis(struct dasd_device *device)
910{ 1143{
911 struct dasd_eckd_private *private; 1144 struct dasd_eckd_private *private;
912 1145
913 private = (struct dasd_eckd_private *) device->private; 1146 private = (struct dasd_eckd_private *) block->base->private;
914 if (private->init_cqr_status < 0) 1147 if (private->init_cqr_status < 0)
915 return dasd_eckd_start_analysis(device); 1148 return dasd_eckd_start_analysis(block);
916 else 1149 else
917 return dasd_eckd_end_analysis(device); 1150 return dasd_eckd_end_analysis(block);
918} 1151}
919 1152
1153static int dasd_eckd_ready_to_online(struct dasd_device *device)
1154{
1155 return dasd_alias_add_device(device);
1156};
1157
1158static int dasd_eckd_online_to_ready(struct dasd_device *device)
1159{
1160 return dasd_alias_remove_device(device);
1161};
1162
920static int 1163static int
921dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 1164dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
922{ 1165{
923 struct dasd_eckd_private *private; 1166 struct dasd_eckd_private *private;
924 1167
925 private = (struct dasd_eckd_private *) device->private; 1168 private = (struct dasd_eckd_private *) block->base->private;
926 if (dasd_check_blocksize(device->bp_block) == 0) { 1169 if (dasd_check_blocksize(block->bp_block) == 0) {
927 geo->sectors = recs_per_track(&private->rdc_data, 1170 geo->sectors = recs_per_track(&private->rdc_data,
928 0, device->bp_block); 1171 0, block->bp_block);
929 } 1172 }
930 geo->cylinders = private->rdc_data.no_cyl; 1173 geo->cylinders = private->rdc_data.no_cyl;
931 geo->heads = private->rdc_data.trk_per_cyl; 1174 geo->heads = private->rdc_data.trk_per_cyl;
@@ -1037,7 +1280,7 @@ dasd_eckd_format_device(struct dasd_device * device,
1037 locate_record(ccw++, (struct LO_eckd_data *) data, 1280 locate_record(ccw++, (struct LO_eckd_data *) data,
1038 fdata->start_unit, 0, rpt + 1, 1281 fdata->start_unit, 0, rpt + 1,
1039 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, 1282 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1040 device->bp_block); 1283 device->block->bp_block);
1041 data += sizeof(struct LO_eckd_data); 1284 data += sizeof(struct LO_eckd_data);
1042 break; 1285 break;
1043 case 0x04: /* Invalidate track. */ 1286 case 0x04: /* Invalidate track. */
@@ -1110,43 +1353,28 @@ dasd_eckd_format_device(struct dasd_device * device,
1110 ccw++; 1353 ccw++;
1111 } 1354 }
1112 } 1355 }
1113 fcp->device = device; 1356 fcp->startdev = device;
1114 fcp->retries = 2; /* set retry counter to enable ERP */ 1357 fcp->memdev = device;
1358 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags);
1359 fcp->retries = 5; /* set retry counter to enable default ERP */
1115 fcp->buildclk = get_clock(); 1360 fcp->buildclk = get_clock();
1116 fcp->status = DASD_CQR_FILLED; 1361 fcp->status = DASD_CQR_FILLED;
1117 return fcp; 1362 return fcp;
1118} 1363}
1119 1364
1120static dasd_era_t 1365static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1121dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
1122{ 1366{
1123 struct dasd_device *device = (struct dasd_device *) cqr->device; 1367 cqr->status = DASD_CQR_FILLED;
1124 struct ccw_device *cdev = device->cdev; 1368 if (cqr->block && (cqr->startdev != cqr->block->base)) {
1125 1369 dasd_eckd_reset_ccw_to_base_io(cqr);
1126 if (irb->scsw.cstat == 0x00 && 1370 cqr->startdev = cqr->block->base;
1127 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
1128 return dasd_era_none;
1129
1130 switch (cdev->id.cu_type) {
1131 case 0x3990:
1132 case 0x2105:
1133 case 0x2107:
1134 case 0x1750:
1135 return dasd_3990_erp_examine(cqr, irb);
1136 case 0x9343:
1137 return dasd_9343_erp_examine(cqr, irb);
1138 case 0x3880:
1139 default:
1140 DEV_MESSAGE(KERN_WARNING, device, "%s",
1141 "default (unknown CU type) - RECOVERABLE return");
1142 return dasd_era_recover;
1143 } 1371 }
1144} 1372};
1145 1373
1146static dasd_erp_fn_t 1374static dasd_erp_fn_t
1147dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 1375dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1148{ 1376{
1149 struct dasd_device *device = (struct dasd_device *) cqr->device; 1377 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1150 struct ccw_device *cdev = device->cdev; 1378 struct ccw_device *cdev = device->cdev;
1151 1379
1152 switch (cdev->id.cu_type) { 1380 switch (cdev->id.cu_type) {
@@ -1168,8 +1396,37 @@ dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1168 return dasd_default_erp_postaction; 1396 return dasd_default_erp_postaction;
1169} 1397}
1170 1398
1171static struct dasd_ccw_req * 1399
1172dasd_eckd_build_cp(struct dasd_device * device, struct request *req) 1400static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1401 struct irb *irb)
1402{
1403 char mask;
1404
1405 /* first of all check for state change pending interrupt */
1406 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1407 if ((irb->scsw.dstat & mask) == mask) {
1408 dasd_generic_handle_state_change(device);
1409 return;
1410 }
1411
1412 /* summary unit check */
1413 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && irb->ecw[7] == 0x0D) {
1414 dasd_alias_handle_summary_unit_check(device, irb);
1415 return;
1416 }
1417
1418 /* just report other unsolicited interrupts */
1419 DEV_MESSAGE(KERN_DEBUG, device, "%s",
1420 "unsolicited interrupt received");
1421 device->discipline->dump_sense(device, NULL, irb);
1422 dasd_schedule_device_bh(device);
1423
1424 return;
1425};
1426
1427static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
1428 struct dasd_block *block,
1429 struct request *req)
1173{ 1430{
1174 struct dasd_eckd_private *private; 1431 struct dasd_eckd_private *private;
1175 unsigned long *idaws; 1432 unsigned long *idaws;
@@ -1185,8 +1442,11 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1185 sector_t first_trk, last_trk; 1442 sector_t first_trk, last_trk;
1186 unsigned int first_offs, last_offs; 1443 unsigned int first_offs, last_offs;
1187 unsigned char cmd, rcmd; 1444 unsigned char cmd, rcmd;
1445 int use_prefix;
1446 struct dasd_device *basedev;
1188 1447
1189 private = (struct dasd_eckd_private *) device->private; 1448 basedev = block->base;
1449 private = (struct dasd_eckd_private *) basedev->private;
1190 if (rq_data_dir(req) == READ) 1450 if (rq_data_dir(req) == READ)
1191 cmd = DASD_ECKD_CCW_READ_MT; 1451 cmd = DASD_ECKD_CCW_READ_MT;
1192 else if (rq_data_dir(req) == WRITE) 1452 else if (rq_data_dir(req) == WRITE)
@@ -1194,13 +1454,13 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1194 else 1454 else
1195 return ERR_PTR(-EINVAL); 1455 return ERR_PTR(-EINVAL);
1196 /* Calculate number of blocks/records per track. */ 1456 /* Calculate number of blocks/records per track. */
1197 blksize = device->bp_block; 1457 blksize = block->bp_block;
1198 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 1458 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1199 /* Calculate record id of first and last block. */ 1459 /* Calculate record id of first and last block. */
1200 first_rec = first_trk = req->sector >> device->s2b_shift; 1460 first_rec = first_trk = req->sector >> block->s2b_shift;
1201 first_offs = sector_div(first_trk, blk_per_trk); 1461 first_offs = sector_div(first_trk, blk_per_trk);
1202 last_rec = last_trk = 1462 last_rec = last_trk =
1203 (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 1463 (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
1204 last_offs = sector_div(last_trk, blk_per_trk); 1464 last_offs = sector_div(last_trk, blk_per_trk);
1205 /* Check struct bio and count the number of blocks for the request. */ 1465 /* Check struct bio and count the number of blocks for the request. */
1206 count = 0; 1466 count = 0;
@@ -1209,20 +1469,33 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1209 if (bv->bv_len & (blksize - 1)) 1469 if (bv->bv_len & (blksize - 1))
1210 /* Eckd can only do full blocks. */ 1470 /* Eckd can only do full blocks. */
1211 return ERR_PTR(-EINVAL); 1471 return ERR_PTR(-EINVAL);
1212 count += bv->bv_len >> (device->s2b_shift + 9); 1472 count += bv->bv_len >> (block->s2b_shift + 9);
1213#if defined(CONFIG_64BIT) 1473#if defined(CONFIG_64BIT)
1214 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 1474 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1215 cidaw += bv->bv_len >> (device->s2b_shift + 9); 1475 cidaw += bv->bv_len >> (block->s2b_shift + 9);
1216#endif 1476#endif
1217 } 1477 }
1218 /* Paranoia. */ 1478 /* Paranoia. */
1219 if (count != last_rec - first_rec + 1) 1479 if (count != last_rec - first_rec + 1)
1220 return ERR_PTR(-EINVAL); 1480 return ERR_PTR(-EINVAL);
1221 /* 1x define extent + 1x locate record + number of blocks */ 1481
1222 cplength = 2 + count; 1482 /* use the prefix command if available */
1223 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 1483 use_prefix = private->features.feature[8] & 0x01;
1224 datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) + 1484 if (use_prefix) {
1225 cidaw * sizeof(unsigned long); 1485 /* 1x prefix + number of blocks */
1486 cplength = 2 + count;
1487 /* 1x prefix + cidaws*sizeof(long) */
1488 datasize = sizeof(struct PFX_eckd_data) +
1489 sizeof(struct LO_eckd_data) +
1490 cidaw * sizeof(unsigned long);
1491 } else {
1492 /* 1x define extent + 1x locate record + number of blocks */
1493 cplength = 2 + count;
1494 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1495 datasize = sizeof(struct DE_eckd_data) +
1496 sizeof(struct LO_eckd_data) +
1497 cidaw * sizeof(unsigned long);
1498 }
1226 /* Find out the number of additional locate record ccws for cdl. */ 1499 /* Find out the number of additional locate record ccws for cdl. */
1227 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 1500 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1228 if (last_rec >= 2*blk_per_trk) 1501 if (last_rec >= 2*blk_per_trk)
@@ -1232,26 +1505,42 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1232 } 1505 }
1233 /* Allocate the ccw request. */ 1506 /* Allocate the ccw request. */
1234 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1507 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1235 cplength, datasize, device); 1508 cplength, datasize, startdev);
1236 if (IS_ERR(cqr)) 1509 if (IS_ERR(cqr))
1237 return cqr; 1510 return cqr;
1238 ccw = cqr->cpaddr; 1511 ccw = cqr->cpaddr;
1239 /* First ccw is define extent. */ 1512 /* First ccw is define extent or prefix. */
1240 if (define_extent(ccw++, cqr->data, first_trk, 1513 if (use_prefix) {
1241 last_trk, cmd, device) == -EAGAIN) { 1514 if (prefix(ccw++, cqr->data, first_trk,
1242 /* Clock not in sync and XRC is enabled. Try again later. */ 1515 last_trk, cmd, basedev, startdev) == -EAGAIN) {
1243 dasd_sfree_request(cqr, device); 1516 /* Clock not in sync and XRC is enabled.
1244 return ERR_PTR(-EAGAIN); 1517 * Try again later.
1518 */
1519 dasd_sfree_request(cqr, startdev);
1520 return ERR_PTR(-EAGAIN);
1521 }
1522 idaws = (unsigned long *) (cqr->data +
1523 sizeof(struct PFX_eckd_data));
1524 } else {
1525 if (define_extent(ccw++, cqr->data, first_trk,
1526 last_trk, cmd, startdev) == -EAGAIN) {
1527 /* Clock not in sync and XRC is enabled.
1528 * Try again later.
1529 */
1530 dasd_sfree_request(cqr, startdev);
1531 return ERR_PTR(-EAGAIN);
1532 }
1533 idaws = (unsigned long *) (cqr->data +
1534 sizeof(struct DE_eckd_data));
1245 } 1535 }
1246 /* Build locate_record+read/write/ccws. */ 1536 /* Build locate_record+read/write/ccws. */
1247 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1248 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1537 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1249 recid = first_rec; 1538 recid = first_rec;
1250 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 1539 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1251 /* Only standard blocks so there is just one locate record. */ 1540 /* Only standard blocks so there is just one locate record. */
1252 ccw[-1].flags |= CCW_FLAG_CC; 1541 ccw[-1].flags |= CCW_FLAG_CC;
1253 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1542 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1254 last_rec - recid + 1, cmd, device, blksize); 1543 last_rec - recid + 1, cmd, basedev, blksize);
1255 } 1544 }
1256 rq_for_each_segment(bv, req, iter) { 1545 rq_for_each_segment(bv, req, iter) {
1257 dst = page_address(bv->bv_page) + bv->bv_offset; 1546 dst = page_address(bv->bv_page) + bv->bv_offset;
@@ -1281,7 +1570,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1281 ccw[-1].flags |= CCW_FLAG_CC; 1570 ccw[-1].flags |= CCW_FLAG_CC;
1282 locate_record(ccw++, LO_data++, 1571 locate_record(ccw++, LO_data++,
1283 trkid, recoffs + 1, 1572 trkid, recoffs + 1,
1284 1, rcmd, device, count); 1573 1, rcmd, basedev, count);
1285 } 1574 }
1286 /* Locate record for standard blocks ? */ 1575 /* Locate record for standard blocks ? */
1287 if (private->uses_cdl && recid == 2*blk_per_trk) { 1576 if (private->uses_cdl && recid == 2*blk_per_trk) {
@@ -1289,7 +1578,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1289 locate_record(ccw++, LO_data++, 1578 locate_record(ccw++, LO_data++,
1290 trkid, recoffs + 1, 1579 trkid, recoffs + 1,
1291 last_rec - recid + 1, 1580 last_rec - recid + 1,
1292 cmd, device, count); 1581 cmd, basedev, count);
1293 } 1582 }
1294 /* Read/write ccw. */ 1583 /* Read/write ccw. */
1295 ccw[-1].flags |= CCW_FLAG_CC; 1584 ccw[-1].flags |= CCW_FLAG_CC;
@@ -1310,7 +1599,9 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1310 } 1599 }
1311 if (req->cmd_flags & REQ_FAILFAST) 1600 if (req->cmd_flags & REQ_FAILFAST)
1312 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1601 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1313 cqr->device = device; 1602 cqr->startdev = startdev;
1603 cqr->memdev = startdev;
1604 cqr->block = block;
1314 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1605 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1315 cqr->lpm = private->path_data.ppm; 1606 cqr->lpm = private->path_data.ppm;
1316 cqr->retries = 256; 1607 cqr->retries = 256;
@@ -1333,10 +1624,10 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1333 1624
1334 if (!dasd_page_cache) 1625 if (!dasd_page_cache)
1335 goto out; 1626 goto out;
1336 private = (struct dasd_eckd_private *) cqr->device->private; 1627 private = (struct dasd_eckd_private *) cqr->block->base->private;
1337 blksize = cqr->device->bp_block; 1628 blksize = cqr->block->bp_block;
1338 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 1629 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1339 recid = req->sector >> cqr->device->s2b_shift; 1630 recid = req->sector >> cqr->block->s2b_shift;
1340 ccw = cqr->cpaddr; 1631 ccw = cqr->cpaddr;
1341 /* Skip over define extent & locate record. */ 1632 /* Skip over define extent & locate record. */
1342 ccw++; 1633 ccw++;
@@ -1367,10 +1658,71 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1367 } 1658 }
1368out: 1659out:
1369 status = cqr->status == DASD_CQR_DONE; 1660 status = cqr->status == DASD_CQR_DONE;
1370 dasd_sfree_request(cqr, cqr->device); 1661 dasd_sfree_request(cqr, cqr->memdev);
1371 return status; 1662 return status;
1372} 1663}
1373 1664
1665/*
1666 * Modify ccw chain in cqr so it can be started on a base device.
1667 *
1668 * Note that this is not enough to restart the cqr!
1669 * Either reset cqr->startdev as well (summary unit check handling)
1670 * or restart via separate cqr (as in ERP handling).
1671 */
1672void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
1673{
1674 struct ccw1 *ccw;
1675 struct PFX_eckd_data *pfxdata;
1676
1677 ccw = cqr->cpaddr;
1678 pfxdata = cqr->data;
1679
1680 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
1681 pfxdata->validity.verify_base = 0;
1682 pfxdata->validity.hyper_pav = 0;
1683 }
1684}
1685
1686#define DASD_ECKD_CHANQ_MAX_SIZE 4
1687
1688static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
1689 struct dasd_block *block,
1690 struct request *req)
1691{
1692 struct dasd_eckd_private *private;
1693 struct dasd_device *startdev;
1694 unsigned long flags;
1695 struct dasd_ccw_req *cqr;
1696
1697 startdev = dasd_alias_get_start_dev(base);
1698 if (!startdev)
1699 startdev = base;
1700 private = (struct dasd_eckd_private *) startdev->private;
1701 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
1702 return ERR_PTR(-EBUSY);
1703
1704 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
1705 private->count++;
1706 cqr = dasd_eckd_build_cp(startdev, block, req);
1707 if (IS_ERR(cqr))
1708 private->count--;
1709 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
1710 return cqr;
1711}
1712
1713static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
1714 struct request *req)
1715{
1716 struct dasd_eckd_private *private;
1717 unsigned long flags;
1718
1719 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
1720 private = (struct dasd_eckd_private *) cqr->memdev->private;
1721 private->count--;
1722 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
1723 return dasd_eckd_free_cp(cqr, req);
1724}
1725
1374static int 1726static int
1375dasd_eckd_fill_info(struct dasd_device * device, 1727dasd_eckd_fill_info(struct dasd_device * device,
1376 struct dasd_information2_t * info) 1728 struct dasd_information2_t * info)
@@ -1384,9 +1736,9 @@ dasd_eckd_fill_info(struct dasd_device * device,
1384 info->characteristics_size = sizeof(struct dasd_eckd_characteristics); 1736 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1385 memcpy(info->characteristics, &private->rdc_data, 1737 memcpy(info->characteristics, &private->rdc_data,
1386 sizeof(struct dasd_eckd_characteristics)); 1738 sizeof(struct dasd_eckd_characteristics));
1387 info->confdata_size = sizeof (struct dasd_eckd_confdata); 1739 info->confdata_size = sizeof(struct dasd_eckd_confdata);
1388 memcpy(info->configuration_data, &private->conf_data, 1740 memcpy(info->configuration_data, &private->conf_data,
1389 sizeof (struct dasd_eckd_confdata)); 1741 sizeof(struct dasd_eckd_confdata));
1390 return 0; 1742 return 0;
1391} 1743}
1392 1744
@@ -1419,7 +1771,8 @@ dasd_eckd_release(struct dasd_device *device)
1419 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1771 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1420 cqr->cpaddr->count = 32; 1772 cqr->cpaddr->count = 32;
1421 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1773 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1422 cqr->device = device; 1774 cqr->startdev = device;
1775 cqr->memdev = device;
1423 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1776 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1424 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1777 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1425 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1778 cqr->retries = 2; /* set retry counter to enable basic ERP */
@@ -1429,7 +1782,7 @@ dasd_eckd_release(struct dasd_device *device)
1429 1782
1430 rc = dasd_sleep_on_immediatly(cqr); 1783 rc = dasd_sleep_on_immediatly(cqr);
1431 1784
1432 dasd_sfree_request(cqr, cqr->device); 1785 dasd_sfree_request(cqr, cqr->memdev);
1433 return rc; 1786 return rc;
1434} 1787}
1435 1788
@@ -1459,7 +1812,8 @@ dasd_eckd_reserve(struct dasd_device *device)
1459 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1812 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1460 cqr->cpaddr->count = 32; 1813 cqr->cpaddr->count = 32;
1461 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1814 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1462 cqr->device = device; 1815 cqr->startdev = device;
1816 cqr->memdev = device;
1463 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1817 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1464 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1818 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1465 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1819 cqr->retries = 2; /* set retry counter to enable basic ERP */
@@ -1469,7 +1823,7 @@ dasd_eckd_reserve(struct dasd_device *device)
1469 1823
1470 rc = dasd_sleep_on_immediatly(cqr); 1824 rc = dasd_sleep_on_immediatly(cqr);
1471 1825
1472 dasd_sfree_request(cqr, cqr->device); 1826 dasd_sfree_request(cqr, cqr->memdev);
1473 return rc; 1827 return rc;
1474} 1828}
1475 1829
@@ -1498,7 +1852,8 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1498 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1852 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1499 cqr->cpaddr->count = 32; 1853 cqr->cpaddr->count = 32;
1500 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1854 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1501 cqr->device = device; 1855 cqr->startdev = device;
1856 cqr->memdev = device;
1502 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1857 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1503 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1858 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1504 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1859 cqr->retries = 2; /* set retry counter to enable basic ERP */
@@ -1508,7 +1863,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
1508 1863
1509 rc = dasd_sleep_on_immediatly(cqr); 1864 rc = dasd_sleep_on_immediatly(cqr);
1510 1865
1511 dasd_sfree_request(cqr, cqr->device); 1866 dasd_sfree_request(cqr, cqr->memdev);
1512 return rc; 1867 return rc;
1513} 1868}
1514 1869
@@ -1526,52 +1881,52 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
1526 1881
1527 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1882 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1528 1 /* PSF */ + 1 /* RSSD */ , 1883 1 /* PSF */ + 1 /* RSSD */ ,
1529 (sizeof (struct dasd_psf_prssd_data) + 1884 (sizeof(struct dasd_psf_prssd_data) +
1530 sizeof (struct dasd_rssd_perf_stats_t)), 1885 sizeof(struct dasd_rssd_perf_stats_t)),
1531 device); 1886 device);
1532 if (IS_ERR(cqr)) { 1887 if (IS_ERR(cqr)) {
1533 DEV_MESSAGE(KERN_WARNING, device, "%s", 1888 DEV_MESSAGE(KERN_WARNING, device, "%s",
1534 "Could not allocate initialization request"); 1889 "Could not allocate initialization request");
1535 return PTR_ERR(cqr); 1890 return PTR_ERR(cqr);
1536 } 1891 }
1537 cqr->device = device; 1892 cqr->startdev = device;
1893 cqr->memdev = device;
1538 cqr->retries = 0; 1894 cqr->retries = 0;
1539 cqr->expires = 10 * HZ; 1895 cqr->expires = 10 * HZ;
1540 1896
1541 /* Prepare for Read Subsystem Data */ 1897 /* Prepare for Read Subsystem Data */
1542 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1898 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1543 memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data)); 1899 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1544 prssdp->order = PSF_ORDER_PRSSD; 1900 prssdp->order = PSF_ORDER_PRSSD;
1545 prssdp->suborder = 0x01; /* Performance Statistics */ 1901 prssdp->suborder = 0x01; /* Performance Statistics */
1546 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 1902 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
1547 1903
1548 ccw = cqr->cpaddr; 1904 ccw = cqr->cpaddr;
1549 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1905 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1550 ccw->count = sizeof (struct dasd_psf_prssd_data); 1906 ccw->count = sizeof(struct dasd_psf_prssd_data);
1551 ccw->flags |= CCW_FLAG_CC; 1907 ccw->flags |= CCW_FLAG_CC;
1552 ccw->cda = (__u32)(addr_t) prssdp; 1908 ccw->cda = (__u32)(addr_t) prssdp;
1553 1909
1554 /* Read Subsystem Data - Performance Statistics */ 1910 /* Read Subsystem Data - Performance Statistics */
1555 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 1911 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1556 memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t)); 1912 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
1557 1913
1558 ccw++; 1914 ccw++;
1559 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1915 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1560 ccw->count = sizeof (struct dasd_rssd_perf_stats_t); 1916 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
1561 ccw->cda = (__u32)(addr_t) stats; 1917 ccw->cda = (__u32)(addr_t) stats;
1562 1918
1563 cqr->buildclk = get_clock(); 1919 cqr->buildclk = get_clock();
1564 cqr->status = DASD_CQR_FILLED; 1920 cqr->status = DASD_CQR_FILLED;
1565 rc = dasd_sleep_on(cqr); 1921 rc = dasd_sleep_on(cqr);
1566 if (rc == 0) { 1922 if (rc == 0) {
1567 /* Prepare for Read Subsystem Data */
1568 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1923 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1569 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 1924 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1570 if (copy_to_user(argp, stats, 1925 if (copy_to_user(argp, stats,
1571 sizeof(struct dasd_rssd_perf_stats_t))) 1926 sizeof(struct dasd_rssd_perf_stats_t)))
1572 rc = -EFAULT; 1927 rc = -EFAULT;
1573 } 1928 }
1574 dasd_sfree_request(cqr, cqr->device); 1929 dasd_sfree_request(cqr, cqr->memdev);
1575 return rc; 1930 return rc;
1576} 1931}
1577 1932
@@ -1594,7 +1949,7 @@ dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
1594 1949
1595 rc = 0; 1950 rc = 0;
1596 if (copy_to_user(argp, (long *) &attrib, 1951 if (copy_to_user(argp, (long *) &attrib,
1597 sizeof (struct attrib_data_t))) 1952 sizeof(struct attrib_data_t)))
1598 rc = -EFAULT; 1953 rc = -EFAULT;
1599 1954
1600 return rc; 1955 return rc;
@@ -1627,8 +1982,10 @@ dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
1627} 1982}
1628 1983
1629static int 1984static int
1630dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) 1985dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
1631{ 1986{
1987 struct dasd_device *device = block->base;
1988
1632 switch (cmd) { 1989 switch (cmd) {
1633 case BIODASDGATTR: 1990 case BIODASDGATTR:
1634 return dasd_eckd_get_attrib(device, argp); 1991 return dasd_eckd_get_attrib(device, argp);
@@ -1685,9 +2042,8 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1685 * Print sense data and related channel program. 2042 * Print sense data and related channel program.
1686 * Parts are printed because printk buffer is only 1024 bytes. 2043 * Parts are printed because printk buffer is only 1024 bytes.
1687 */ 2044 */
1688static void 2045static void dasd_eckd_dump_sense(struct dasd_device *device,
1689dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, 2046 struct dasd_ccw_req *req, struct irb *irb)
1690 struct irb *irb)
1691{ 2047{
1692 char *page; 2048 char *page;
1693 struct ccw1 *first, *last, *fail, *from, *to; 2049 struct ccw1 *first, *last, *fail, *from, *to;
@@ -1743,37 +2099,40 @@ dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
1743 } 2099 }
1744 printk("%s", page); 2100 printk("%s", page);
1745 2101
1746 /* dump the Channel Program (max 140 Bytes per line) */ 2102 if (req) {
1747 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 2103 /* req == NULL for unsolicited interrupts */
1748 first = req->cpaddr; 2104 /* dump the Channel Program (max 140 Bytes per line) */
1749 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 2105 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
1750 to = min(first + 6, last); 2106 first = req->cpaddr;
1751 len = sprintf(page, KERN_ERR PRINTK_HEADER 2107 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
1752 " Related CP in req: %p\n", req); 2108 to = min(first + 6, last);
1753 dasd_eckd_dump_ccw_range(first, to, page + len); 2109 len = sprintf(page, KERN_ERR PRINTK_HEADER
1754 printk("%s", page); 2110 " Related CP in req: %p\n", req);
2111 dasd_eckd_dump_ccw_range(first, to, page + len);
2112 printk("%s", page);
1755 2113
1756 /* print failing CCW area (maximum 4) */ 2114 /* print failing CCW area (maximum 4) */
1757 /* scsw->cda is either valid or zero */ 2115 /* scsw->cda is either valid or zero */
1758 len = 0; 2116 len = 0;
1759 from = ++to; 2117 from = ++to;
1760 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */ 2118 fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
1761 if (from < fail - 2) { 2119 if (from < fail - 2) {
1762 from = fail - 2; /* there is a gap - print header */ 2120 from = fail - 2; /* there is a gap - print header */
1763 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 2121 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
1764 } 2122 }
1765 to = min(fail + 1, last); 2123 to = min(fail + 1, last);
1766 len += dasd_eckd_dump_ccw_range(from, to, page + len); 2124 len += dasd_eckd_dump_ccw_range(from, to, page + len);
1767 2125
1768 /* print last CCWs (maximum 2) */ 2126 /* print last CCWs (maximum 2) */
1769 from = max(from, ++to); 2127 from = max(from, ++to);
1770 if (from < last - 1) { 2128 if (from < last - 1) {
1771 from = last - 1; /* there is a gap - print header */ 2129 from = last - 1; /* there is a gap - print header */
1772 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 2130 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
2131 }
2132 len += dasd_eckd_dump_ccw_range(from, last, page + len);
2133 if (len > 0)
2134 printk("%s", page);
1773 } 2135 }
1774 len += dasd_eckd_dump_ccw_range(from, last, page + len);
1775 if (len > 0)
1776 printk("%s", page);
1777 free_page((unsigned long) page); 2136 free_page((unsigned long) page);
1778} 2137}
1779 2138
@@ -1796,16 +2155,20 @@ static struct dasd_discipline dasd_eckd_discipline = {
1796 .ebcname = "ECKD", 2155 .ebcname = "ECKD",
1797 .max_blocks = 240, 2156 .max_blocks = 240,
1798 .check_device = dasd_eckd_check_characteristics, 2157 .check_device = dasd_eckd_check_characteristics,
2158 .uncheck_device = dasd_eckd_uncheck_device,
1799 .do_analysis = dasd_eckd_do_analysis, 2159 .do_analysis = dasd_eckd_do_analysis,
2160 .ready_to_online = dasd_eckd_ready_to_online,
2161 .online_to_ready = dasd_eckd_online_to_ready,
1800 .fill_geometry = dasd_eckd_fill_geometry, 2162 .fill_geometry = dasd_eckd_fill_geometry,
1801 .start_IO = dasd_start_IO, 2163 .start_IO = dasd_start_IO,
1802 .term_IO = dasd_term_IO, 2164 .term_IO = dasd_term_IO,
2165 .handle_terminated_request = dasd_eckd_handle_terminated_request,
1803 .format_device = dasd_eckd_format_device, 2166 .format_device = dasd_eckd_format_device,
1804 .examine_error = dasd_eckd_examine_error,
1805 .erp_action = dasd_eckd_erp_action, 2167 .erp_action = dasd_eckd_erp_action,
1806 .erp_postaction = dasd_eckd_erp_postaction, 2168 .erp_postaction = dasd_eckd_erp_postaction,
1807 .build_cp = dasd_eckd_build_cp, 2169 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
1808 .free_cp = dasd_eckd_free_cp, 2170 .build_cp = dasd_eckd_build_alias_cp,
2171 .free_cp = dasd_eckd_free_alias_cp,
1809 .dump_sense = dasd_eckd_dump_sense, 2172 .dump_sense = dasd_eckd_dump_sense,
1810 .fill_info = dasd_eckd_fill_info, 2173 .fill_info = dasd_eckd_fill_info,
1811 .ioctl = dasd_eckd_ioctl, 2174 .ioctl = dasd_eckd_ioctl,
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 712ff1650134..fc2509c939bc 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -39,6 +39,8 @@
39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e 39#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d 40#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
41#define DASD_ECKD_CCW_RESERVE 0xB4 41#define DASD_ECKD_CCW_RESERVE 0xB4
42#define DASD_ECKD_CCW_PFX 0xE7
43#define DASD_ECKD_CCW_RSCK 0xF9
42 44
43/* 45/*
44 * Perform Subsystem Function / Sub-Orders 46 * Perform Subsystem Function / Sub-Orders
@@ -137,6 +139,25 @@ struct LO_eckd_data {
137 __u16 length; 139 __u16 length;
138} __attribute__ ((packed)); 140} __attribute__ ((packed));
139 141
142/* Prefix data for format 0x00 and 0x01 */
143struct PFX_eckd_data {
144 unsigned char format;
145 struct {
146 unsigned char define_extend:1;
147 unsigned char time_stamp:1;
148 unsigned char verify_base:1;
149 unsigned char hyper_pav:1;
150 unsigned char reserved:4;
151 } __attribute__ ((packed)) validity;
152 __u8 base_address;
153 __u8 aux;
154 __u8 base_lss;
155 __u8 reserved[7];
156 struct DE_eckd_data define_extend;
157 struct LO_eckd_data locate_record;
158 __u8 LO_extended_data[4];
159} __attribute__ ((packed));
160
140struct dasd_eckd_characteristics { 161struct dasd_eckd_characteristics {
141 __u16 cu_type; 162 __u16 cu_type;
142 struct { 163 struct {
@@ -254,7 +275,9 @@ struct dasd_eckd_confdata {
254 } __attribute__ ((packed)) ned; 275 } __attribute__ ((packed)) ned;
255 struct { 276 struct {
256 unsigned char flags; /* byte 0 */ 277 unsigned char flags; /* byte 0 */
257 unsigned char res2[7]; /* byte 1- 7 */ 278 unsigned char res1; /* byte 1 */
279 __u16 format; /* byte 2-3 */
280 unsigned char res2[4]; /* byte 4-7 */
258 unsigned char sua_flags; /* byte 8 */ 281 unsigned char sua_flags; /* byte 8 */
259 __u8 base_unit_addr; /* byte 9 */ 282 __u8 base_unit_addr; /* byte 9 */
260 unsigned char res3[22]; /* byte 10-31 */ 283 unsigned char res3[22]; /* byte 10-31 */
@@ -343,6 +366,11 @@ struct dasd_eckd_path {
343 __u8 npm; 366 __u8 npm;
344}; 367};
345 368
369struct dasd_rssd_features {
370 char feature[256];
371} __attribute__((packed));
372
373
346/* 374/*
347 * Perform Subsystem Function - Prepare for Read Subsystem Data 375 * Perform Subsystem Function - Prepare for Read Subsystem Data
348 */ 376 */
@@ -365,4 +393,99 @@ struct dasd_psf_ssc_data {
365 unsigned char reserved[59]; 393 unsigned char reserved[59];
366} __attribute__((packed)); 394} __attribute__((packed));
367 395
396
397/*
398 * some structures and definitions for alias handling
399 */
400struct dasd_unit_address_configuration {
401 struct {
402 char ua_type;
403 char base_ua;
404 } unit[256];
405} __attribute__((packed));
406
407
408#define MAX_DEVICES_PER_LCU 256
409
410/* flags on the LCU */
411#define NEED_UAC_UPDATE 0x01
412#define UPDATE_PENDING 0x02
413
414enum pavtype {NO_PAV, BASE_PAV, HYPER_PAV};
415
416
417struct alias_root {
418 struct list_head serverlist;
419 spinlock_t lock;
420};
421
422struct alias_server {
423 struct list_head server;
424 struct dasd_uid uid;
425 struct list_head lculist;
426};
427
428struct summary_unit_check_work_data {
429 char reason;
430 struct dasd_device *device;
431 struct work_struct worker;
432};
433
434struct read_uac_work_data {
435 struct dasd_device *device;
436 struct delayed_work dwork;
437};
438
439struct alias_lcu {
440 struct list_head lcu;
441 struct dasd_uid uid;
442 enum pavtype pav;
443 char flags;
444 spinlock_t lock;
445 struct list_head grouplist;
446 struct list_head active_devices;
447 struct list_head inactive_devices;
448 struct dasd_unit_address_configuration *uac;
449 struct summary_unit_check_work_data suc_data;
450 struct read_uac_work_data ruac_data;
451 struct dasd_ccw_req *rsu_cqr;
452};
453
454struct alias_pav_group {
455 struct list_head group;
456 struct dasd_uid uid;
457 struct alias_lcu *lcu;
458 struct list_head baselist;
459 struct list_head aliaslist;
460 struct dasd_device *next;
461};
462
463
464struct dasd_eckd_private {
465 struct dasd_eckd_characteristics rdc_data;
466 struct dasd_eckd_confdata conf_data;
467 struct dasd_eckd_path path_data;
468 struct eckd_count count_area[5];
469 int init_cqr_status;
470 int uses_cdl;
471 struct attrib_data_t attrib; /* e.g. cache operations */
472 struct dasd_rssd_features features;
473
474 /* alias managemnet */
475 struct dasd_uid uid;
476 struct alias_pav_group *pavgroup;
477 struct alias_lcu *lcu;
478 int count;
479};
480
481
482
483int dasd_alias_make_device_known_to_lcu(struct dasd_device *);
484void dasd_alias_disconnect_device_from_lcu(struct dasd_device *);
485int dasd_alias_add_device(struct dasd_device *);
486int dasd_alias_remove_device(struct dasd_device *);
487struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *);
488void dasd_alias_handle_summary_unit_check(struct dasd_device *, struct irb *);
489void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *);
490
368#endif /* DASD_ECKD_H */ 491#endif /* DASD_ECKD_H */
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 0c081a664ee8..6e53ab606e97 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -336,7 +336,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
336 unsigned long flags; 336 unsigned long flags;
337 struct eerbuffer *eerb; 337 struct eerbuffer *eerb;
338 338
339 snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 339 snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
340 if (snss_rc) 340 if (snss_rc)
341 data_size = 0; 341 data_size = 0;
342 else 342 else
@@ -404,10 +404,11 @@ void dasd_eer_snss(struct dasd_device *device)
404 set_bit(DASD_FLAG_EER_SNSS, &device->flags); 404 set_bit(DASD_FLAG_EER_SNSS, &device->flags);
405 return; 405 return;
406 } 406 }
407 /* cdev is already locked, can't use dasd_add_request_head */
407 clear_bit(DASD_FLAG_EER_SNSS, &device->flags); 408 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
408 cqr->status = DASD_CQR_QUEUED; 409 cqr->status = DASD_CQR_QUEUED;
409 list_add(&cqr->list, &device->ccw_queue); 410 list_add(&cqr->devlist, &device->ccw_queue);
410 dasd_schedule_bh(device); 411 dasd_schedule_device_bh(device);
411} 412}
412 413
413/* 414/*
@@ -415,7 +416,7 @@ void dasd_eer_snss(struct dasd_device *device)
415 */ 416 */
416static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) 417static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
417{ 418{
418 struct dasd_device *device = cqr->device; 419 struct dasd_device *device = cqr->startdev;
419 unsigned long flags; 420 unsigned long flags;
420 421
421 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); 422 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
@@ -458,7 +459,7 @@ int dasd_eer_enable(struct dasd_device *device)
458 if (!cqr) 459 if (!cqr)
459 return -ENOMEM; 460 return -ENOMEM;
460 461
461 cqr->device = device; 462 cqr->startdev = device;
462 cqr->retries = 255; 463 cqr->retries = 255;
463 cqr->expires = 10 * HZ; 464 cqr->expires = 10 * HZ;
464 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 465 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index caa5d91420f8..8f10000851a3 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -46,6 +46,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
46 if (cqr == NULL) 46 if (cqr == NULL)
47 return ERR_PTR(-ENOMEM); 47 return ERR_PTR(-ENOMEM);
48 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 48 memset(cqr, 0, sizeof(struct dasd_ccw_req));
49 INIT_LIST_HEAD(&cqr->devlist);
50 INIT_LIST_HEAD(&cqr->blocklist);
49 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 51 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
50 cqr->cpaddr = NULL; 52 cqr->cpaddr = NULL;
51 if (cplength > 0) { 53 if (cplength > 0) {
@@ -66,7 +68,7 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
66} 68}
67 69
68void 70void
69dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 71dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
70{ 72{
71 unsigned long flags; 73 unsigned long flags;
72 74
@@ -81,11 +83,11 @@ dasd_free_erp_request(struct dasd_ccw_req * cqr, struct dasd_device * device)
81 * dasd_default_erp_action just retries the current cqr 83 * dasd_default_erp_action just retries the current cqr
82 */ 84 */
83struct dasd_ccw_req * 85struct dasd_ccw_req *
84dasd_default_erp_action(struct dasd_ccw_req * cqr) 86dasd_default_erp_action(struct dasd_ccw_req *cqr)
85{ 87{
86 struct dasd_device *device; 88 struct dasd_device *device;
87 89
88 device = cqr->device; 90 device = cqr->startdev;
89 91
90 /* just retry - there is nothing to save ... I got no sense data.... */ 92 /* just retry - there is nothing to save ... I got no sense data.... */
91 if (cqr->retries > 0) { 93 if (cqr->retries > 0) {
@@ -93,12 +95,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
93 "default ERP called (%i retries left)", 95 "default ERP called (%i retries left)",
94 cqr->retries); 96 cqr->retries);
95 cqr->lpm = LPM_ANYPATH; 97 cqr->lpm = LPM_ANYPATH;
96 cqr->status = DASD_CQR_QUEUED; 98 cqr->status = DASD_CQR_FILLED;
97 } else { 99 } else {
98 DEV_MESSAGE (KERN_WARNING, device, "%s", 100 DEV_MESSAGE (KERN_WARNING, device, "%s",
99 "default ERP called (NO retry left)"); 101 "default ERP called (NO retry left)");
100 cqr->status = DASD_CQR_FAILED; 102 cqr->status = DASD_CQR_FAILED;
101 cqr->stopclk = get_clock (); 103 cqr->stopclk = get_clock();
102 } 104 }
103 return cqr; 105 return cqr;
104} /* end dasd_default_erp_action */ 106} /* end dasd_default_erp_action */
@@ -117,15 +119,12 @@ dasd_default_erp_action(struct dasd_ccw_req * cqr)
117 * RETURN VALUES 119 * RETURN VALUES
118 * cqr pointer to the original CQR 120 * cqr pointer to the original CQR
119 */ 121 */
120struct dasd_ccw_req * 122struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
121dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
122{ 123{
123 struct dasd_device *device;
124 int success; 124 int success;
125 125
126 BUG_ON(cqr->refers == NULL || cqr->function == NULL); 126 BUG_ON(cqr->refers == NULL || cqr->function == NULL);
127 127
128 device = cqr->device;
129 success = cqr->status == DASD_CQR_DONE; 128 success = cqr->status == DASD_CQR_DONE;
130 129
131 /* free all ERPs - but NOT the original cqr */ 130 /* free all ERPs - but NOT the original cqr */
@@ -133,10 +132,10 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr)
133 struct dasd_ccw_req *refers; 132 struct dasd_ccw_req *refers;
134 133
135 refers = cqr->refers; 134 refers = cqr->refers;
136 /* remove the request from the device queue */ 135 /* remove the request from the block queue */
137 list_del(&cqr->list); 136 list_del(&cqr->blocklist);
138 /* free the finished erp request */ 137 /* free the finished erp request */
139 dasd_free_erp_request(cqr, device); 138 dasd_free_erp_request(cqr, cqr->memdev);
140 cqr = refers; 139 cqr = refers;
141 } 140 }
142 141
@@ -157,7 +156,7 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
157{ 156{
158 struct dasd_device *device; 157 struct dasd_device *device;
159 158
160 device = cqr->device; 159 device = cqr->startdev;
161 /* dump sense data */ 160 /* dump sense data */
162 if (device->discipline && device->discipline->dump_sense) 161 if (device->discipline && device->discipline->dump_sense)
163 device->discipline->dump_sense(device, cqr, irb); 162 device->discipline->dump_sense(device, cqr, irb);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 1d95822e0b8e..d13ea05089a7 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -117,6 +117,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
117static int 117static int
118dasd_fba_check_characteristics(struct dasd_device *device) 118dasd_fba_check_characteristics(struct dasd_device *device)
119{ 119{
120 struct dasd_block *block;
120 struct dasd_fba_private *private; 121 struct dasd_fba_private *private;
121 struct ccw_device *cdev = device->cdev; 122 struct ccw_device *cdev = device->cdev;
122 void *rdc_data; 123 void *rdc_data;
@@ -133,6 +134,16 @@ dasd_fba_check_characteristics(struct dasd_device *device)
133 } 134 }
134 device->private = (void *) private; 135 device->private = (void *) private;
135 } 136 }
137 block = dasd_alloc_block();
138 if (IS_ERR(block)) {
139 DEV_MESSAGE(KERN_WARNING, device, "%s",
140 "could not allocate dasd block structure");
141 kfree(device->private);
142 return PTR_ERR(block);
143 }
144 device->block = block;
145 block->base = device;
146
136 /* Read Device Characteristics */ 147 /* Read Device Characteristics */
137 rdc_data = (void *) &(private->rdc_data); 148 rdc_data = (void *) &(private->rdc_data);
138 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); 149 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
@@ -155,60 +166,37 @@ dasd_fba_check_characteristics(struct dasd_device *device)
155 return 0; 166 return 0;
156} 167}
157 168
158static int 169static int dasd_fba_do_analysis(struct dasd_block *block)
159dasd_fba_do_analysis(struct dasd_device *device)
160{ 170{
161 struct dasd_fba_private *private; 171 struct dasd_fba_private *private;
162 int sb, rc; 172 int sb, rc;
163 173
164 private = (struct dasd_fba_private *) device->private; 174 private = (struct dasd_fba_private *) block->base->private;
165 rc = dasd_check_blocksize(private->rdc_data.blk_size); 175 rc = dasd_check_blocksize(private->rdc_data.blk_size);
166 if (rc) { 176 if (rc) {
167 DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d", 177 DEV_MESSAGE(KERN_INFO, block->base, "unknown blocksize %d",
168 private->rdc_data.blk_size); 178 private->rdc_data.blk_size);
169 return rc; 179 return rc;
170 } 180 }
171 device->blocks = private->rdc_data.blk_bdsa; 181 block->blocks = private->rdc_data.blk_bdsa;
172 device->bp_block = private->rdc_data.blk_size; 182 block->bp_block = private->rdc_data.blk_size;
173 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 183 block->s2b_shift = 0; /* bits to shift 512 to get a block */
174 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1) 184 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
175 device->s2b_shift++; 185 block->s2b_shift++;
176 return 0; 186 return 0;
177} 187}
178 188
179static int 189static int dasd_fba_fill_geometry(struct dasd_block *block,
180dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 190 struct hd_geometry *geo)
181{ 191{
182 if (dasd_check_blocksize(device->bp_block) != 0) 192 if (dasd_check_blocksize(block->bp_block) != 0)
183 return -EINVAL; 193 return -EINVAL;
184 geo->cylinders = (device->blocks << device->s2b_shift) >> 10; 194 geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
185 geo->heads = 16; 195 geo->heads = 16;
186 geo->sectors = 128 >> device->s2b_shift; 196 geo->sectors = 128 >> block->s2b_shift;
187 return 0; 197 return 0;
188} 198}
189 199
190static dasd_era_t
191dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
192{
193 struct dasd_device *device;
194 struct ccw_device *cdev;
195
196 device = (struct dasd_device *) cqr->device;
197 if (irb->scsw.cstat == 0x00 &&
198 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
199 return dasd_era_none;
200
201 cdev = device->cdev;
202 switch (cdev->id.dev_type) {
203 case 0x3370:
204 return dasd_3370_erp_examine(cqr, irb);
205 case 0x9336:
206 return dasd_9336_erp_examine(cqr, irb);
207 default:
208 return dasd_era_recover;
209 }
210}
211
212static dasd_erp_fn_t 200static dasd_erp_fn_t
213dasd_fba_erp_action(struct dasd_ccw_req * cqr) 201dasd_fba_erp_action(struct dasd_ccw_req * cqr)
214{ 202{
@@ -221,13 +209,34 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
221 if (cqr->function == dasd_default_erp_action) 209 if (cqr->function == dasd_default_erp_action)
222 return dasd_default_erp_postaction; 210 return dasd_default_erp_postaction;
223 211
224 DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p", 212 DEV_MESSAGE(KERN_WARNING, cqr->startdev, "unknown ERP action %p",
225 cqr->function); 213 cqr->function);
226 return NULL; 214 return NULL;
227} 215}
228 216
229static struct dasd_ccw_req * 217static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
230dasd_fba_build_cp(struct dasd_device * device, struct request *req) 218 struct irb *irb)
219{
220 char mask;
221
222 /* first of all check for state change pending interrupt */
223 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
224 if ((irb->scsw.dstat & mask) == mask) {
225 dasd_generic_handle_state_change(device);
226 return;
227 }
228
229 /* check for unsolicited interrupts */
230 DEV_MESSAGE(KERN_DEBUG, device, "%s",
231 "unsolicited interrupt received");
232 device->discipline->dump_sense(device, NULL, irb);
233 dasd_schedule_device_bh(device);
234 return;
235};
236
237static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
238 struct dasd_block *block,
239 struct request *req)
231{ 240{
232 struct dasd_fba_private *private; 241 struct dasd_fba_private *private;
233 unsigned long *idaws; 242 unsigned long *idaws;
@@ -242,17 +251,17 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
242 unsigned int blksize, off; 251 unsigned int blksize, off;
243 unsigned char cmd; 252 unsigned char cmd;
244 253
245 private = (struct dasd_fba_private *) device->private; 254 private = (struct dasd_fba_private *) block->base->private;
246 if (rq_data_dir(req) == READ) { 255 if (rq_data_dir(req) == READ) {
247 cmd = DASD_FBA_CCW_READ; 256 cmd = DASD_FBA_CCW_READ;
248 } else if (rq_data_dir(req) == WRITE) { 257 } else if (rq_data_dir(req) == WRITE) {
249 cmd = DASD_FBA_CCW_WRITE; 258 cmd = DASD_FBA_CCW_WRITE;
250 } else 259 } else
251 return ERR_PTR(-EINVAL); 260 return ERR_PTR(-EINVAL);
252 blksize = device->bp_block; 261 blksize = block->bp_block;
253 /* Calculate record id of first and last block. */ 262 /* Calculate record id of first and last block. */
254 first_rec = req->sector >> device->s2b_shift; 263 first_rec = req->sector >> block->s2b_shift;
255 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 264 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
256 /* Check struct bio and count the number of blocks for the request. */ 265 /* Check struct bio and count the number of blocks for the request. */
257 count = 0; 266 count = 0;
258 cidaw = 0; 267 cidaw = 0;
@@ -260,7 +269,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
260 if (bv->bv_len & (blksize - 1)) 269 if (bv->bv_len & (blksize - 1))
261 /* Fba can only do full blocks. */ 270 /* Fba can only do full blocks. */
262 return ERR_PTR(-EINVAL); 271 return ERR_PTR(-EINVAL);
263 count += bv->bv_len >> (device->s2b_shift + 9); 272 count += bv->bv_len >> (block->s2b_shift + 9);
264#if defined(CONFIG_64BIT) 273#if defined(CONFIG_64BIT)
265 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 274 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
266 cidaw += bv->bv_len / blksize; 275 cidaw += bv->bv_len / blksize;
@@ -284,13 +293,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
284 } 293 }
285 /* Allocate the ccw request. */ 294 /* Allocate the ccw request. */
286 cqr = dasd_smalloc_request(dasd_fba_discipline.name, 295 cqr = dasd_smalloc_request(dasd_fba_discipline.name,
287 cplength, datasize, device); 296 cplength, datasize, memdev);
288 if (IS_ERR(cqr)) 297 if (IS_ERR(cqr))
289 return cqr; 298 return cqr;
290 ccw = cqr->cpaddr; 299 ccw = cqr->cpaddr;
291 /* First ccw is define extent. */ 300 /* First ccw is define extent. */
292 define_extent(ccw++, cqr->data, rq_data_dir(req), 301 define_extent(ccw++, cqr->data, rq_data_dir(req),
293 device->bp_block, req->sector, req->nr_sectors); 302 block->bp_block, req->sector, req->nr_sectors);
294 /* Build locate_record + read/write ccws. */ 303 /* Build locate_record + read/write ccws. */
295 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 304 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
296 LO_data = (struct LO_fba_data *) (idaws + cidaw); 305 LO_data = (struct LO_fba_data *) (idaws + cidaw);
@@ -326,7 +335,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
326 ccw[-1].flags |= CCW_FLAG_CC; 335 ccw[-1].flags |= CCW_FLAG_CC;
327 } 336 }
328 ccw->cmd_code = cmd; 337 ccw->cmd_code = cmd;
329 ccw->count = device->bp_block; 338 ccw->count = block->bp_block;
330 if (idal_is_needed(dst, blksize)) { 339 if (idal_is_needed(dst, blksize)) {
331 ccw->cda = (__u32)(addr_t) idaws; 340 ccw->cda = (__u32)(addr_t) idaws;
332 ccw->flags = CCW_FLAG_IDA; 341 ccw->flags = CCW_FLAG_IDA;
@@ -342,7 +351,9 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
342 } 351 }
343 if (req->cmd_flags & REQ_FAILFAST) 352 if (req->cmd_flags & REQ_FAILFAST)
344 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
345 cqr->device = device; 354 cqr->startdev = memdev;
355 cqr->memdev = memdev;
356 cqr->block = block;
346 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 357 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
347 cqr->retries = 32; 358 cqr->retries = 32;
348 cqr->buildclk = get_clock(); 359 cqr->buildclk = get_clock();
@@ -363,8 +374,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
363 374
364 if (!dasd_page_cache) 375 if (!dasd_page_cache)
365 goto out; 376 goto out;
366 private = (struct dasd_fba_private *) cqr->device->private; 377 private = (struct dasd_fba_private *) cqr->block->base->private;
367 blksize = cqr->device->bp_block; 378 blksize = cqr->block->bp_block;
368 ccw = cqr->cpaddr; 379 ccw = cqr->cpaddr;
369 /* Skip over define extent & locate record. */ 380 /* Skip over define extent & locate record. */
370 ccw++; 381 ccw++;
@@ -394,10 +405,15 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
394 } 405 }
395out: 406out:
396 status = cqr->status == DASD_CQR_DONE; 407 status = cqr->status == DASD_CQR_DONE;
397 dasd_sfree_request(cqr, cqr->device); 408 dasd_sfree_request(cqr, cqr->memdev);
398 return status; 409 return status;
399} 410}
400 411
412static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
413{
414 cqr->status = DASD_CQR_FILLED;
415};
416
401static int 417static int
402dasd_fba_fill_info(struct dasd_device * device, 418dasd_fba_fill_info(struct dasd_device * device,
403 struct dasd_information2_t * info) 419 struct dasd_information2_t * info)
@@ -546,9 +562,10 @@ static struct dasd_discipline dasd_fba_discipline = {
546 .fill_geometry = dasd_fba_fill_geometry, 562 .fill_geometry = dasd_fba_fill_geometry,
547 .start_IO = dasd_start_IO, 563 .start_IO = dasd_start_IO,
548 .term_IO = dasd_term_IO, 564 .term_IO = dasd_term_IO,
549 .examine_error = dasd_fba_examine_error, 565 .handle_terminated_request = dasd_fba_handle_terminated_request,
550 .erp_action = dasd_fba_erp_action, 566 .erp_action = dasd_fba_erp_action,
551 .erp_postaction = dasd_fba_erp_postaction, 567 .erp_postaction = dasd_fba_erp_postaction,
568 .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt,
552 .build_cp = dasd_fba_build_cp, 569 .build_cp = dasd_fba_build_cp,
553 .free_cp = dasd_fba_free_cp, 570 .free_cp = dasd_fba_free_cp,
554 .dump_sense = dasd_fba_dump_sense, 571 .dump_sense = dasd_fba_dump_sense,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 47ba4462708d..aee6565aaf98 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -25,14 +25,15 @@
25/* 25/*
26 * Allocate and register gendisk structure for device. 26 * Allocate and register gendisk structure for device.
27 */ 27 */
28int 28int dasd_gendisk_alloc(struct dasd_block *block)
29dasd_gendisk_alloc(struct dasd_device *device)
30{ 29{
31 struct gendisk *gdp; 30 struct gendisk *gdp;
31 struct dasd_device *base;
32 int len; 32 int len;
33 33
34 /* Make sure the minor for this device exists. */ 34 /* Make sure the minor for this device exists. */
35 if (device->devindex >= DASD_PER_MAJOR) 35 base = block->base;
36 if (base->devindex >= DASD_PER_MAJOR)
36 return -EBUSY; 37 return -EBUSY;
37 38
38 gdp = alloc_disk(1 << DASD_PARTN_BITS); 39 gdp = alloc_disk(1 << DASD_PARTN_BITS);
@@ -41,9 +42,9 @@ dasd_gendisk_alloc(struct dasd_device *device)
41 42
42 /* Initialize gendisk structure. */ 43 /* Initialize gendisk structure. */
43 gdp->major = DASD_MAJOR; 44 gdp->major = DASD_MAJOR;
44 gdp->first_minor = device->devindex << DASD_PARTN_BITS; 45 gdp->first_minor = base->devindex << DASD_PARTN_BITS;
45 gdp->fops = &dasd_device_operations; 46 gdp->fops = &dasd_device_operations;
46 gdp->driverfs_dev = &device->cdev->dev; 47 gdp->driverfs_dev = &base->cdev->dev;
47 48
48 /* 49 /*
49 * Set device name. 50 * Set device name.
@@ -53,53 +54,51 @@ dasd_gendisk_alloc(struct dasd_device *device)
53 * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252 54 * dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
54 */ 55 */
55 len = sprintf(gdp->disk_name, "dasd"); 56 len = sprintf(gdp->disk_name, "dasd");
56 if (device->devindex > 25) { 57 if (base->devindex > 25) {
57 if (device->devindex > 701) { 58 if (base->devindex > 701) {
58 if (device->devindex > 18277) 59 if (base->devindex > 18277)
59 len += sprintf(gdp->disk_name + len, "%c", 60 len += sprintf(gdp->disk_name + len, "%c",
60 'a'+(((device->devindex-18278) 61 'a'+(((base->devindex-18278)
61 /17576)%26)); 62 /17576)%26));
62 len += sprintf(gdp->disk_name + len, "%c", 63 len += sprintf(gdp->disk_name + len, "%c",
63 'a'+(((device->devindex-702)/676)%26)); 64 'a'+(((base->devindex-702)/676)%26));
64 } 65 }
65 len += sprintf(gdp->disk_name + len, "%c", 66 len += sprintf(gdp->disk_name + len, "%c",
66 'a'+(((device->devindex-26)/26)%26)); 67 'a'+(((base->devindex-26)/26)%26));
67 } 68 }
68 len += sprintf(gdp->disk_name + len, "%c", 'a'+(device->devindex%26)); 69 len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
69 70
70 if (device->features & DASD_FEATURE_READONLY) 71 if (block->base->features & DASD_FEATURE_READONLY)
71 set_disk_ro(gdp, 1); 72 set_disk_ro(gdp, 1);
72 gdp->private_data = device; 73 gdp->private_data = block;
73 gdp->queue = device->request_queue; 74 gdp->queue = block->request_queue;
74 device->gdp = gdp; 75 block->gdp = gdp;
75 set_capacity(device->gdp, 0); 76 set_capacity(block->gdp, 0);
76 add_disk(device->gdp); 77 add_disk(block->gdp);
77 return 0; 78 return 0;
78} 79}
79 80
80/* 81/*
81 * Unregister and free gendisk structure for device. 82 * Unregister and free gendisk structure for device.
82 */ 83 */
83void 84void dasd_gendisk_free(struct dasd_block *block)
84dasd_gendisk_free(struct dasd_device *device)
85{ 85{
86 if (device->gdp) { 86 if (block->gdp) {
87 del_gendisk(device->gdp); 87 del_gendisk(block->gdp);
88 device->gdp->queue = NULL; 88 block->gdp->queue = NULL;
89 put_disk(device->gdp); 89 put_disk(block->gdp);
90 device->gdp = NULL; 90 block->gdp = NULL;
91 } 91 }
92} 92}
93 93
94/* 94/*
95 * Trigger a partition detection. 95 * Trigger a partition detection.
96 */ 96 */
97int 97int dasd_scan_partitions(struct dasd_block *block)
98dasd_scan_partitions(struct dasd_device * device)
99{ 98{
100 struct block_device *bdev; 99 struct block_device *bdev;
101 100
102 bdev = bdget_disk(device->gdp, 0); 101 bdev = bdget_disk(block->gdp, 0);
103 if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0) 102 if (!bdev || blkdev_get(bdev, FMODE_READ, 1) < 0)
104 return -ENODEV; 103 return -ENODEV;
105 /* 104 /*
@@ -117,7 +116,7 @@ dasd_scan_partitions(struct dasd_device * device)
117 * is why the assignment to device->bdev is done AFTER 116 * is why the assignment to device->bdev is done AFTER
118 * the BLKRRPART ioctl. 117 * the BLKRRPART ioctl.
119 */ 118 */
120 device->bdev = bdev; 119 block->bdev = bdev;
121 return 0; 120 return 0;
122} 121}
123 122
@@ -125,8 +124,7 @@ dasd_scan_partitions(struct dasd_device * device)
125 * Remove all inodes in the system for a device, delete the 124 * Remove all inodes in the system for a device, delete the
126 * partitions and make device unusable by setting its size to zero. 125 * partitions and make device unusable by setting its size to zero.
127 */ 126 */
128void 127void dasd_destroy_partitions(struct dasd_block *block)
129dasd_destroy_partitions(struct dasd_device * device)
130{ 128{
131 /* The two structs have 168/176 byte on 31/64 bit. */ 129 /* The two structs have 168/176 byte on 31/64 bit. */
132 struct blkpg_partition bpart; 130 struct blkpg_partition bpart;
@@ -137,8 +135,8 @@ dasd_destroy_partitions(struct dasd_device * device)
137 * Get the bdev pointer from the device structure and clear 135 * Get the bdev pointer from the device structure and clear
138 * device->bdev to lower the offline open_count limit again. 136 * device->bdev to lower the offline open_count limit again.
139 */ 137 */
140 bdev = device->bdev; 138 bdev = block->bdev;
141 device->bdev = NULL; 139 block->bdev = NULL;
142 140
143 /* 141 /*
144 * See fs/partition/check.c:delete_partition 142 * See fs/partition/check.c:delete_partition
@@ -149,17 +147,16 @@ dasd_destroy_partitions(struct dasd_device * device)
149 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); 147 memset(&barg, 0, sizeof(struct blkpg_ioctl_arg));
150 barg.data = (void __force __user *) &bpart; 148 barg.data = (void __force __user *) &bpart;
151 barg.op = BLKPG_DEL_PARTITION; 149 barg.op = BLKPG_DEL_PARTITION;
152 for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) 150 for (bpart.pno = block->gdp->minors - 1; bpart.pno > 0; bpart.pno--)
153 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); 151 ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
154 152
155 invalidate_partition(device->gdp, 0); 153 invalidate_partition(block->gdp, 0);
156 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */ 154 /* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
157 blkdev_put(bdev); 155 blkdev_put(bdev);
158 set_capacity(device->gdp, 0); 156 set_capacity(block->gdp, 0);
159} 157}
160 158
161int 159int dasd_gendisk_init(void)
162dasd_gendisk_init(void)
163{ 160{
164 int rc; 161 int rc;
165 162
@@ -174,8 +171,7 @@ dasd_gendisk_init(void)
174 return 0; 171 return 0;
175} 172}
176 173
177void 174void dasd_gendisk_exit(void)
178dasd_gendisk_exit(void)
179{ 175{
180 unregister_blkdev(DASD_MAJOR, "dasd"); 176 unregister_blkdev(DASD_MAJOR, "dasd");
181} 177}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index d427daeef511..44b2984dfbee 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -64,13 +64,7 @@
64 * SECTION: Type definitions 64 * SECTION: Type definitions
65 */ 65 */
66struct dasd_device; 66struct dasd_device;
67 67struct dasd_block;
68typedef enum {
69 dasd_era_fatal = -1, /* no chance to recover */
70 dasd_era_none = 0, /* don't recover, everything alright */
71 dasd_era_msg = 1, /* don't recover, just report... */
72 dasd_era_recover = 2 /* recovery action recommended */
73} dasd_era_t;
74 68
75/* BIT DEFINITIONS FOR SENSE DATA */ 69/* BIT DEFINITIONS FOR SENSE DATA */
76#define DASD_SENSE_BIT_0 0x80 70#define DASD_SENSE_BIT_0 0x80
@@ -151,19 +145,22 @@ do { \
151 145
152struct dasd_ccw_req { 146struct dasd_ccw_req {
153 unsigned int magic; /* Eye catcher */ 147 unsigned int magic; /* Eye catcher */
154 struct list_head list; /* list_head for request queueing. */ 148 struct list_head devlist; /* for dasd_device request queue */
149 struct list_head blocklist; /* for dasd_block request queue */
155 150
156 /* Where to execute what... */ 151 /* Where to execute what... */
157 struct dasd_device *device; /* device the request is for */ 152 struct dasd_block *block; /* the originating block device */
153 struct dasd_device *memdev; /* the device used to allocate this */
154 struct dasd_device *startdev; /* device the request is started on */
158 struct ccw1 *cpaddr; /* address of channel program */ 155 struct ccw1 *cpaddr; /* address of channel program */
159 char status; /* status of this request */ 156 char status; /* status of this request */
160 short retries; /* A retry counter */ 157 short retries; /* A retry counter */
161 unsigned long flags; /* flags of this request */ 158 unsigned long flags; /* flags of this request */
162 159
163 /* ... and how */ 160 /* ... and how */
164 unsigned long starttime; /* jiffies time of request start */ 161 unsigned long starttime; /* jiffies time of request start */
165 int expires; /* expiration period in jiffies */ 162 int expires; /* expiration period in jiffies */
166 char lpm; /* logical path mask */ 163 char lpm; /* logical path mask */
167 void *data; /* pointer to data area */ 164 void *data; /* pointer to data area */
168 165
169 /* these are important for recovering erroneous requests */ 166 /* these are important for recovering erroneous requests */
@@ -178,20 +175,27 @@ struct dasd_ccw_req {
178 unsigned long long endclk; /* TOD-clock of request termination */ 175 unsigned long long endclk; /* TOD-clock of request termination */
179 176
180 /* Callback that is called after reaching final status. */ 177 /* Callback that is called after reaching final status. */
181 void (*callback)(struct dasd_ccw_req *, void *data); 178 void (*callback)(struct dasd_ccw_req *, void *data);
182 void *callback_data; 179 void *callback_data;
183}; 180};
184 181
185/* 182/*
186 * dasd_ccw_req -> status can be: 183 * dasd_ccw_req -> status can be:
187 */ 184 */
188#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */ 185#define DASD_CQR_FILLED 0x00 /* request is ready to be processed */
189#define DASD_CQR_QUEUED 0x01 /* request is queued to be processed */ 186#define DASD_CQR_DONE 0x01 /* request is completed successfully */
190#define DASD_CQR_IN_IO 0x02 /* request is currently in IO */ 187#define DASD_CQR_NEED_ERP 0x02 /* request needs recovery action */
191#define DASD_CQR_DONE 0x03 /* request is completed successfully */ 188#define DASD_CQR_IN_ERP 0x03 /* request is in recovery */
192#define DASD_CQR_ERROR 0x04 /* request is completed with error */ 189#define DASD_CQR_FAILED 0x04 /* request is finally failed */
193#define DASD_CQR_FAILED 0x05 /* request is finally failed */ 190#define DASD_CQR_TERMINATED 0x05 /* request was stopped by driver */
194#define DASD_CQR_CLEAR 0x06 /* request is clear pending */ 191
192#define DASD_CQR_QUEUED 0x80 /* request is queued to be processed */
193#define DASD_CQR_IN_IO 0x81 /* request is currently in IO */
194#define DASD_CQR_ERROR 0x82 /* request is completed with error */
195#define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */
196#define DASD_CQR_CLEARED 0x84 /* request was cleared */
197#define DASD_CQR_SUCCESS 0x85 /* request was successfull */
198
195 199
196/* per dasd_ccw_req flags */ 200/* per dasd_ccw_req flags */
197#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */ 201#define DASD_CQR_FLAGS_USE_ERP 0 /* use ERP for this request */
@@ -214,52 +218,71 @@ struct dasd_discipline {
214 218
215 struct list_head list; /* used for list of disciplines */ 219 struct list_head list; /* used for list of disciplines */
216 220
217 /* 221 /*
218 * Device recognition functions. check_device is used to verify 222 * Device recognition functions. check_device is used to verify
219 * the sense data and the information returned by read device 223 * the sense data and the information returned by read device
220 * characteristics. It returns 0 if the discipline can be used 224 * characteristics. It returns 0 if the discipline can be used
221 * for the device in question. 225 * for the device in question. uncheck_device is called during
222 * do_analysis is used in the step from device state "basic" to 226 * device shutdown to deregister a device from its discipline.
223 * state "accept". It returns 0 if the device can be made ready, 227 */
224 * it returns -EMEDIUMTYPE if the device can't be made ready or 228 int (*check_device) (struct dasd_device *);
225 * -EAGAIN if do_analysis started a ccw that needs to complete 229 void (*uncheck_device) (struct dasd_device *);
226 * before the analysis may be repeated. 230
227 */ 231 /*
228 int (*check_device)(struct dasd_device *); 232 * do_analysis is used in the step from device state "basic" to
229 int (*do_analysis) (struct dasd_device *); 233 * state "accept". It returns 0 if the device can be made ready,
230 234 * it returns -EMEDIUMTYPE if the device can't be made ready or
231 /* 235 * -EAGAIN if do_analysis started a ccw that needs to complete
232 * Device operation functions. build_cp creates a ccw chain for 236 * before the analysis may be repeated.
233 * a block device request, start_io starts the request and 237 */
234 * term_IO cancels it (e.g. in case of a timeout). format_device 238 int (*do_analysis) (struct dasd_block *);
235 * returns a ccw chain to be used to format the device. 239
236 */ 240 /*
241 * Last things to do when a device is set online, and first things
242 * when it is set offline.
243 */
244 int (*ready_to_online) (struct dasd_device *);
245 int (*online_to_ready) (struct dasd_device *);
246
247 /*
248 * Device operation functions. build_cp creates a ccw chain for
249 * a block device request, start_io starts the request and
250 * term_IO cancels it (e.g. in case of a timeout). format_device
251 * returns a ccw chain to be used to format the device.
252 * handle_terminated_request allows to examine a cqr and prepare
253 * it for retry.
254 */
237 struct dasd_ccw_req *(*build_cp) (struct dasd_device *, 255 struct dasd_ccw_req *(*build_cp) (struct dasd_device *,
256 struct dasd_block *,
238 struct request *); 257 struct request *);
239 int (*start_IO) (struct dasd_ccw_req *); 258 int (*start_IO) (struct dasd_ccw_req *);
240 int (*term_IO) (struct dasd_ccw_req *); 259 int (*term_IO) (struct dasd_ccw_req *);
260 void (*handle_terminated_request) (struct dasd_ccw_req *);
241 struct dasd_ccw_req *(*format_device) (struct dasd_device *, 261 struct dasd_ccw_req *(*format_device) (struct dasd_device *,
242 struct format_data_t *); 262 struct format_data_t *);
243 int (*free_cp) (struct dasd_ccw_req *, struct request *); 263 int (*free_cp) (struct dasd_ccw_req *, struct request *);
244 /* 264
245 * Error recovery functions. examine_error() returns a value that 265 /*
246 * indicates what to do for an error condition. If examine_error() 266 * Error recovery functions. examine_error() returns a value that
267 * indicates what to do for an error condition. If examine_error()
247 * returns 'dasd_era_recover' erp_action() is called to create a 268 * returns 'dasd_era_recover' erp_action() is called to create a
248 * special error recovery ccw. erp_postaction() is called after 269 * special error recovery ccw. erp_postaction() is called after
249 * an error recovery ccw has finished its execution. dump_sense 270 * an error recovery ccw has finished its execution. dump_sense
250 * is called for every error condition to print the sense data 271 * is called for every error condition to print the sense data
251 * to the console. 272 * to the console.
252 */ 273 */
253 dasd_era_t(*examine_error) (struct dasd_ccw_req *, struct irb *);
254 dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *); 274 dasd_erp_fn_t(*erp_action) (struct dasd_ccw_req *);
255 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *); 275 dasd_erp_fn_t(*erp_postaction) (struct dasd_ccw_req *);
256 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *, 276 void (*dump_sense) (struct dasd_device *, struct dasd_ccw_req *,
257 struct irb *); 277 struct irb *);
258 278
279 void (*handle_unsolicited_interrupt) (struct dasd_device *,
280 struct irb *);
281
259 /* i/o control functions. */ 282 /* i/o control functions. */
260 int (*fill_geometry) (struct dasd_device *, struct hd_geometry *); 283 int (*fill_geometry) (struct dasd_block *, struct hd_geometry *);
261 int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); 284 int (*fill_info) (struct dasd_device *, struct dasd_information2_t *);
262 int (*ioctl) (struct dasd_device *, unsigned int, void __user *); 285 int (*ioctl) (struct dasd_block *, unsigned int, void __user *);
263}; 286};
264 287
265extern struct dasd_discipline *dasd_diag_discipline_pointer; 288extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -267,12 +290,18 @@ extern struct dasd_discipline *dasd_diag_discipline_pointer;
267/* 290/*
268 * Unique identifier for dasd device. 291 * Unique identifier for dasd device.
269 */ 292 */
293#define UA_NOT_CONFIGURED 0x00
294#define UA_BASE_DEVICE 0x01
295#define UA_BASE_PAV_ALIAS 0x02
296#define UA_HYPER_PAV_ALIAS 0x03
297
270struct dasd_uid { 298struct dasd_uid {
271 __u8 alias; 299 __u8 type;
272 char vendor[4]; 300 char vendor[4];
273 char serial[15]; 301 char serial[15];
274 __u16 ssid; 302 __u16 ssid;
275 __u8 unit_addr; 303 __u8 real_unit_addr;
304 __u8 base_unit_addr;
276}; 305};
277 306
278/* 307/*
@@ -293,14 +322,9 @@ struct dasd_uid {
293 322
294struct dasd_device { 323struct dasd_device {
295 /* Block device stuff. */ 324 /* Block device stuff. */
296 struct gendisk *gdp; 325 struct dasd_block *block;
297 struct request_queue *request_queue; 326
298 spinlock_t request_queue_lock;
299 struct block_device *bdev;
300 unsigned int devindex; 327 unsigned int devindex;
301 unsigned long blocks; /* size of volume in blocks */
302 unsigned int bp_block; /* bytes per block */
303 unsigned int s2b_shift; /* log2 (bp_block/512) */
304 unsigned long flags; /* per device flags */ 328 unsigned long flags; /* per device flags */
305 unsigned short features; /* copy of devmap-features (read-only!) */ 329 unsigned short features; /* copy of devmap-features (read-only!) */
306 330
@@ -316,9 +340,8 @@ struct dasd_device {
316 int state, target; 340 int state, target;
317 int stopped; /* device (ccw_device_start) was stopped */ 341 int stopped; /* device (ccw_device_start) was stopped */
318 342
319 /* Open and reference count. */ 343 /* reference count. */
320 atomic_t ref_count; 344 atomic_t ref_count;
321 atomic_t open_count;
322 345
323 /* ccw queue and memory for static ccw/erp buffers. */ 346 /* ccw queue and memory for static ccw/erp buffers. */
324 struct list_head ccw_queue; 347 struct list_head ccw_queue;
@@ -337,20 +360,45 @@ struct dasd_device {
337 360
338 struct ccw_device *cdev; 361 struct ccw_device *cdev;
339 362
363 /* hook for alias management */
364 struct list_head alias_list;
365};
366
367struct dasd_block {
368 /* Block device stuff. */
369 struct gendisk *gdp;
370 struct request_queue *request_queue;
371 spinlock_t request_queue_lock;
372 struct block_device *bdev;
373 atomic_t open_count;
374
375 unsigned long blocks; /* size of volume in blocks */
376 unsigned int bp_block; /* bytes per block */
377 unsigned int s2b_shift; /* log2 (bp_block/512) */
378
379 struct dasd_device *base;
380 struct list_head ccw_queue;
381 spinlock_t queue_lock;
382
383 atomic_t tasklet_scheduled;
384 struct tasklet_struct tasklet;
385 struct timer_list timer;
386
340#ifdef CONFIG_DASD_PROFILE 387#ifdef CONFIG_DASD_PROFILE
341 struct dasd_profile_info_t profile; 388 struct dasd_profile_info_t profile;
342#endif 389#endif
343}; 390};
344 391
392
393
345/* reasons why device (ccw_device_start) was stopped */ 394/* reasons why device (ccw_device_start) was stopped */
346#define DASD_STOPPED_NOT_ACC 1 /* not accessible */ 395#define DASD_STOPPED_NOT_ACC 1 /* not accessible */
347#define DASD_STOPPED_QUIESCE 2 /* Quiesced */ 396#define DASD_STOPPED_QUIESCE 2 /* Quiesced */
348#define DASD_STOPPED_PENDING 4 /* long busy */ 397#define DASD_STOPPED_PENDING 4 /* long busy */
349#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */ 398#define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */
350#define DASD_STOPPED_DC_EIO 16 /* disconnected, return -EIO */ 399#define DASD_STOPPED_SU 16 /* summary unit check handling */
351 400
352/* per device flags */ 401/* per device flags */
353#define DASD_FLAG_DSC_ERROR 2 /* return -EIO when disconnected */
354#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ 402#define DASD_FLAG_OFFLINE 3 /* device is in offline processing */
355#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */ 403#define DASD_FLAG_EER_SNSS 4 /* A SNSS is required */
356#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */ 404#define DASD_FLAG_EER_IN_USE 5 /* A SNSS request is running */
@@ -489,6 +537,9 @@ dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
489struct dasd_device *dasd_alloc_device(void); 537struct dasd_device *dasd_alloc_device(void);
490void dasd_free_device(struct dasd_device *); 538void dasd_free_device(struct dasd_device *);
491 539
540struct dasd_block *dasd_alloc_block(void);
541void dasd_free_block(struct dasd_block *);
542
492void dasd_enable_device(struct dasd_device *); 543void dasd_enable_device(struct dasd_device *);
493void dasd_set_target_state(struct dasd_device *, int); 544void dasd_set_target_state(struct dasd_device *, int);
494void dasd_kick_device(struct dasd_device *); 545void dasd_kick_device(struct dasd_device *);
@@ -497,18 +548,23 @@ void dasd_add_request_head(struct dasd_ccw_req *);
497void dasd_add_request_tail(struct dasd_ccw_req *); 548void dasd_add_request_tail(struct dasd_ccw_req *);
498int dasd_start_IO(struct dasd_ccw_req *); 549int dasd_start_IO(struct dasd_ccw_req *);
499int dasd_term_IO(struct dasd_ccw_req *); 550int dasd_term_IO(struct dasd_ccw_req *);
500void dasd_schedule_bh(struct dasd_device *); 551void dasd_schedule_device_bh(struct dasd_device *);
552void dasd_schedule_block_bh(struct dasd_block *);
501int dasd_sleep_on(struct dasd_ccw_req *); 553int dasd_sleep_on(struct dasd_ccw_req *);
502int dasd_sleep_on_immediatly(struct dasd_ccw_req *); 554int dasd_sleep_on_immediatly(struct dasd_ccw_req *);
503int dasd_sleep_on_interruptible(struct dasd_ccw_req *); 555int dasd_sleep_on_interruptible(struct dasd_ccw_req *);
504void dasd_set_timer(struct dasd_device *, int); 556void dasd_device_set_timer(struct dasd_device *, int);
505void dasd_clear_timer(struct dasd_device *); 557void dasd_device_clear_timer(struct dasd_device *);
558void dasd_block_set_timer(struct dasd_block *, int);
559void dasd_block_clear_timer(struct dasd_block *);
506int dasd_cancel_req(struct dasd_ccw_req *); 560int dasd_cancel_req(struct dasd_ccw_req *);
561int dasd_flush_device_queue(struct dasd_device *);
507int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *); 562int dasd_generic_probe (struct ccw_device *, struct dasd_discipline *);
508void dasd_generic_remove (struct ccw_device *cdev); 563void dasd_generic_remove (struct ccw_device *cdev);
509int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); 564int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
510int dasd_generic_set_offline (struct ccw_device *cdev); 565int dasd_generic_set_offline (struct ccw_device *cdev);
511int dasd_generic_notify(struct ccw_device *, int); 566int dasd_generic_notify(struct ccw_device *, int);
567void dasd_generic_handle_state_change(struct dasd_device *);
512 568
513int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int); 569int dasd_generic_read_dev_chars(struct dasd_device *, char *, void **, int);
514 570
@@ -542,10 +598,10 @@ int dasd_busid_known(char *);
542/* externals in dasd_gendisk.c */ 598/* externals in dasd_gendisk.c */
543int dasd_gendisk_init(void); 599int dasd_gendisk_init(void);
544void dasd_gendisk_exit(void); 600void dasd_gendisk_exit(void);
545int dasd_gendisk_alloc(struct dasd_device *); 601int dasd_gendisk_alloc(struct dasd_block *);
546void dasd_gendisk_free(struct dasd_device *); 602void dasd_gendisk_free(struct dasd_block *);
547int dasd_scan_partitions(struct dasd_device *); 603int dasd_scan_partitions(struct dasd_block *);
548void dasd_destroy_partitions(struct dasd_device *); 604void dasd_destroy_partitions(struct dasd_block *);
549 605
550/* externals in dasd_ioctl.c */ 606/* externals in dasd_ioctl.c */
551int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); 607int dasd_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
@@ -563,20 +619,9 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
563void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); 619void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
564void dasd_log_sense(struct dasd_ccw_req *, struct irb *); 620void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
565 621
566/* externals in dasd_3370_erp.c */
567dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *);
568
569/* externals in dasd_3990_erp.c */ 622/* externals in dasd_3990_erp.c */
570dasd_era_t dasd_3990_erp_examine(struct dasd_ccw_req *, struct irb *);
571struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *); 623struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
572 624
573/* externals in dasd_9336_erp.c */
574dasd_era_t dasd_9336_erp_examine(struct dasd_ccw_req *, struct irb *);
575
576/* externals in dasd_9336_erp.c */
577dasd_era_t dasd_9343_erp_examine(struct dasd_ccw_req *, struct irb *);
578struct dasd_ccw_req *dasd_9343_erp_action(struct dasd_ccw_req *);
579
580/* externals in dasd_eer.c */ 625/* externals in dasd_eer.c */
581#ifdef CONFIG_DASD_EER 626#ifdef CONFIG_DASD_EER
582int dasd_eer_init(void); 627int dasd_eer_init(void);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 672eb0a3dd0b..91a64630cb0f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -38,15 +38,15 @@ dasd_ioctl_api_version(void __user *argp)
38static int 38static int
39dasd_ioctl_enable(struct block_device *bdev) 39dasd_ioctl_enable(struct block_device *bdev)
40{ 40{
41 struct dasd_device *device = bdev->bd_disk->private_data; 41 struct dasd_block *block = bdev->bd_disk->private_data;
42 42
43 if (!capable(CAP_SYS_ADMIN)) 43 if (!capable(CAP_SYS_ADMIN))
44 return -EACCES; 44 return -EACCES;
45 45
46 dasd_enable_device(device); 46 dasd_enable_device(block->base);
47 /* Formatting the dasd device can change the capacity. */ 47 /* Formatting the dasd device can change the capacity. */
48 mutex_lock(&bdev->bd_mutex); 48 mutex_lock(&bdev->bd_mutex);
49 i_size_write(bdev->bd_inode, (loff_t)get_capacity(device->gdp) << 9); 49 i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9);
50 mutex_unlock(&bdev->bd_mutex); 50 mutex_unlock(&bdev->bd_mutex);
51 return 0; 51 return 0;
52} 52}
@@ -58,7 +58,7 @@ dasd_ioctl_enable(struct block_device *bdev)
58static int 58static int
59dasd_ioctl_disable(struct block_device *bdev) 59dasd_ioctl_disable(struct block_device *bdev)
60{ 60{
61 struct dasd_device *device = bdev->bd_disk->private_data; 61 struct dasd_block *block = bdev->bd_disk->private_data;
62 62
63 if (!capable(CAP_SYS_ADMIN)) 63 if (!capable(CAP_SYS_ADMIN))
64 return -EACCES; 64 return -EACCES;
@@ -71,7 +71,7 @@ dasd_ioctl_disable(struct block_device *bdev)
71 * using the BIODASDFMT ioctl. Therefore the correct state for the 71 * using the BIODASDFMT ioctl. Therefore the correct state for the
72 * device is DASD_STATE_BASIC that allows to do basic i/o. 72 * device is DASD_STATE_BASIC that allows to do basic i/o.
73 */ 73 */
74 dasd_set_target_state(device, DASD_STATE_BASIC); 74 dasd_set_target_state(block->base, DASD_STATE_BASIC);
75 /* 75 /*
76 * Set i_size to zero, since read, write, etc. check against this 76 * Set i_size to zero, since read, write, etc. check against this
77 * value. 77 * value.
@@ -85,19 +85,19 @@ dasd_ioctl_disable(struct block_device *bdev)
85/* 85/*
86 * Quiesce device. 86 * Quiesce device.
87 */ 87 */
88static int 88static int dasd_ioctl_quiesce(struct dasd_block *block)
89dasd_ioctl_quiesce(struct dasd_device *device)
90{ 89{
91 unsigned long flags; 90 unsigned long flags;
91 struct dasd_device *base;
92 92
93 base = block->base;
93 if (!capable (CAP_SYS_ADMIN)) 94 if (!capable (CAP_SYS_ADMIN))
94 return -EACCES; 95 return -EACCES;
95 96
96 DEV_MESSAGE (KERN_DEBUG, device, "%s", 97 DEV_MESSAGE(KERN_DEBUG, base, "%s", "Quiesce IO on device");
97 "Quiesce IO on device"); 98 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
98 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 99 base->stopped |= DASD_STOPPED_QUIESCE;
99 device->stopped |= DASD_STOPPED_QUIESCE; 100 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
100 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
101 return 0; 101 return 0;
102} 102}
103 103
@@ -105,22 +105,21 @@ dasd_ioctl_quiesce(struct dasd_device *device)
105/* 105/*
106 * Quiesce device. 106 * Quiesce device.
107 */ 107 */
108static int 108static int dasd_ioctl_resume(struct dasd_block *block)
109dasd_ioctl_resume(struct dasd_device *device)
110{ 109{
111 unsigned long flags; 110 unsigned long flags;
111 struct dasd_device *base;
112 112
113 base = block->base;
113 if (!capable (CAP_SYS_ADMIN)) 114 if (!capable (CAP_SYS_ADMIN))
114 return -EACCES; 115 return -EACCES;
115 116
116 DEV_MESSAGE (KERN_DEBUG, device, "%s", 117 DEV_MESSAGE(KERN_DEBUG, base, "%s", "resume IO on device");
117 "resume IO on device"); 118 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
118 119 base->stopped &= ~DASD_STOPPED_QUIESCE;
119 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 120 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
120 device->stopped &= ~DASD_STOPPED_QUIESCE;
121 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
122 121
123 dasd_schedule_bh (device); 122 dasd_schedule_block_bh(block);
124 return 0; 123 return 0;
125} 124}
126 125
@@ -130,22 +129,23 @@ dasd_ioctl_resume(struct dasd_device *device)
130 * commands to format a single unit of the device. In terms of the ECKD 129 * commands to format a single unit of the device. In terms of the ECKD
131 * devices this means CCWs are generated to format a single track. 130 * devices this means CCWs are generated to format a single track.
132 */ 131 */
133static int 132static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
134dasd_format(struct dasd_device * device, struct format_data_t * fdata)
135{ 133{
136 struct dasd_ccw_req *cqr; 134 struct dasd_ccw_req *cqr;
135 struct dasd_device *base;
137 int rc; 136 int rc;
138 137
139 if (device->discipline->format_device == NULL) 138 base = block->base;
139 if (base->discipline->format_device == NULL)
140 return -EPERM; 140 return -EPERM;
141 141
142 if (device->state != DASD_STATE_BASIC) { 142 if (base->state != DASD_STATE_BASIC) {
143 DEV_MESSAGE(KERN_WARNING, device, "%s", 143 DEV_MESSAGE(KERN_WARNING, base, "%s",
144 "dasd_format: device is not disabled! "); 144 "dasd_format: device is not disabled! ");
145 return -EBUSY; 145 return -EBUSY;
146 } 146 }
147 147
148 DBF_DEV_EVENT(DBF_NOTICE, device, 148 DBF_DEV_EVENT(DBF_NOTICE, base,
149 "formatting units %d to %d (%d B blocks) flags %d", 149 "formatting units %d to %d (%d B blocks) flags %d",
150 fdata->start_unit, 150 fdata->start_unit,
151 fdata->stop_unit, fdata->blksize, fdata->intensity); 151 fdata->stop_unit, fdata->blksize, fdata->intensity);
@@ -156,20 +156,20 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata)
156 * enabling the device later. 156 * enabling the device later.
157 */ 157 */
158 if (fdata->start_unit == 0) { 158 if (fdata->start_unit == 0) {
159 struct block_device *bdev = bdget_disk(device->gdp, 0); 159 struct block_device *bdev = bdget_disk(block->gdp, 0);
160 bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize); 160 bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
161 bdput(bdev); 161 bdput(bdev);
162 } 162 }
163 163
164 while (fdata->start_unit <= fdata->stop_unit) { 164 while (fdata->start_unit <= fdata->stop_unit) {
165 cqr = device->discipline->format_device(device, fdata); 165 cqr = base->discipline->format_device(base, fdata);
166 if (IS_ERR(cqr)) 166 if (IS_ERR(cqr))
167 return PTR_ERR(cqr); 167 return PTR_ERR(cqr);
168 rc = dasd_sleep_on_interruptible(cqr); 168 rc = dasd_sleep_on_interruptible(cqr);
169 dasd_sfree_request(cqr, cqr->device); 169 dasd_sfree_request(cqr, cqr->memdev);
170 if (rc) { 170 if (rc) {
171 if (rc != -ERESTARTSYS) 171 if (rc != -ERESTARTSYS)
172 DEV_MESSAGE(KERN_ERR, device, 172 DEV_MESSAGE(KERN_ERR, base,
173 " Formatting of unit %d failed " 173 " Formatting of unit %d failed "
174 "with rc = %d", 174 "with rc = %d",
175 fdata->start_unit, rc); 175 fdata->start_unit, rc);
@@ -186,7 +186,7 @@ dasd_format(struct dasd_device * device, struct format_data_t * fdata)
186static int 186static int
187dasd_ioctl_format(struct block_device *bdev, void __user *argp) 187dasd_ioctl_format(struct block_device *bdev, void __user *argp)
188{ 188{
189 struct dasd_device *device = bdev->bd_disk->private_data; 189 struct dasd_block *block = bdev->bd_disk->private_data;
190 struct format_data_t fdata; 190 struct format_data_t fdata;
191 191
192 if (!capable(CAP_SYS_ADMIN)) 192 if (!capable(CAP_SYS_ADMIN))
@@ -194,51 +194,47 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
194 if (!argp) 194 if (!argp)
195 return -EINVAL; 195 return -EINVAL;
196 196
197 if (device->features & DASD_FEATURE_READONLY) 197 if (block->base->features & DASD_FEATURE_READONLY)
198 return -EROFS; 198 return -EROFS;
199 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 199 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t)))
200 return -EFAULT; 200 return -EFAULT;
201 if (bdev != bdev->bd_contains) { 201 if (bdev != bdev->bd_contains) {
202 DEV_MESSAGE(KERN_WARNING, device, "%s", 202 DEV_MESSAGE(KERN_WARNING, block->base, "%s",
203 "Cannot low-level format a partition"); 203 "Cannot low-level format a partition");
204 return -EINVAL; 204 return -EINVAL;
205 } 205 }
206 return dasd_format(device, &fdata); 206 return dasd_format(block, &fdata);
207} 207}
208 208
209#ifdef CONFIG_DASD_PROFILE 209#ifdef CONFIG_DASD_PROFILE
210/* 210/*
211 * Reset device profile information 211 * Reset device profile information
212 */ 212 */
213static int 213static int dasd_ioctl_reset_profile(struct dasd_block *block)
214dasd_ioctl_reset_profile(struct dasd_device *device)
215{ 214{
216 memset(&device->profile, 0, sizeof (struct dasd_profile_info_t)); 215 memset(&block->profile, 0, sizeof(struct dasd_profile_info_t));
217 return 0; 216 return 0;
218} 217}
219 218
220/* 219/*
221 * Return device profile information 220 * Return device profile information
222 */ 221 */
223static int 222static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
224dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
225{ 223{
226 if (dasd_profile_level == DASD_PROFILE_OFF) 224 if (dasd_profile_level == DASD_PROFILE_OFF)
227 return -EIO; 225 return -EIO;
228 if (copy_to_user(argp, &device->profile, 226 if (copy_to_user(argp, &block->profile,
229 sizeof (struct dasd_profile_info_t))) 227 sizeof(struct dasd_profile_info_t)))
230 return -EFAULT; 228 return -EFAULT;
231 return 0; 229 return 0;
232} 230}
233#else 231#else
234static int 232static int dasd_ioctl_reset_profile(struct dasd_block *block)
235dasd_ioctl_reset_profile(struct dasd_device *device)
236{ 233{
237 return -ENOSYS; 234 return -ENOSYS;
238} 235}
239 236
240static int 237static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
241dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
242{ 238{
243 return -ENOSYS; 239 return -ENOSYS;
244} 240}
@@ -247,87 +243,88 @@ dasd_ioctl_read_profile(struct dasd_device *device, void __user *argp)
247/* 243/*
248 * Return dasd information. Used for BIODASDINFO and BIODASDINFO2. 244 * Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
249 */ 245 */
250static int 246static int dasd_ioctl_information(struct dasd_block *block,
251dasd_ioctl_information(struct dasd_device *device, 247 unsigned int cmd, void __user *argp)
252 unsigned int cmd, void __user *argp)
253{ 248{
254 struct dasd_information2_t *dasd_info; 249 struct dasd_information2_t *dasd_info;
255 unsigned long flags; 250 unsigned long flags;
256 int rc; 251 int rc;
252 struct dasd_device *base;
257 struct ccw_device *cdev; 253 struct ccw_device *cdev;
258 struct ccw_dev_id dev_id; 254 struct ccw_dev_id dev_id;
259 255
260 if (!device->discipline->fill_info) 256 base = block->base;
257 if (!base->discipline->fill_info)
261 return -EINVAL; 258 return -EINVAL;
262 259
263 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL); 260 dasd_info = kzalloc(sizeof(struct dasd_information2_t), GFP_KERNEL);
264 if (dasd_info == NULL) 261 if (dasd_info == NULL)
265 return -ENOMEM; 262 return -ENOMEM;
266 263
267 rc = device->discipline->fill_info(device, dasd_info); 264 rc = base->discipline->fill_info(base, dasd_info);
268 if (rc) { 265 if (rc) {
269 kfree(dasd_info); 266 kfree(dasd_info);
270 return rc; 267 return rc;
271 } 268 }
272 269
273 cdev = device->cdev; 270 cdev = base->cdev;
274 ccw_device_get_id(cdev, &dev_id); 271 ccw_device_get_id(cdev, &dev_id);
275 272
276 dasd_info->devno = dev_id.devno; 273 dasd_info->devno = dev_id.devno;
277 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev); 274 dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev);
278 dasd_info->cu_type = cdev->id.cu_type; 275 dasd_info->cu_type = cdev->id.cu_type;
279 dasd_info->cu_model = cdev->id.cu_model; 276 dasd_info->cu_model = cdev->id.cu_model;
280 dasd_info->dev_type = cdev->id.dev_type; 277 dasd_info->dev_type = cdev->id.dev_type;
281 dasd_info->dev_model = cdev->id.dev_model; 278 dasd_info->dev_model = cdev->id.dev_model;
282 dasd_info->status = device->state; 279 dasd_info->status = base->state;
283 /* 280 /*
284 * The open_count is increased for every opener, that includes 281 * The open_count is increased for every opener, that includes
285 * the blkdev_get in dasd_scan_partitions. 282 * the blkdev_get in dasd_scan_partitions.
286 * This must be hidden from user-space. 283 * This must be hidden from user-space.
287 */ 284 */
288 dasd_info->open_count = atomic_read(&device->open_count); 285 dasd_info->open_count = atomic_read(&block->open_count);
289 if (!device->bdev) 286 if (!block->bdev)
290 dasd_info->open_count++; 287 dasd_info->open_count++;
291 288
292 /* 289 /*
293 * check if device is really formatted 290 * check if device is really formatted
294 * LDL / CDL was returned by 'fill_info' 291 * LDL / CDL was returned by 'fill_info'
295 */ 292 */
296 if ((device->state < DASD_STATE_READY) || 293 if ((base->state < DASD_STATE_READY) ||
297 (dasd_check_blocksize(device->bp_block))) 294 (dasd_check_blocksize(block->bp_block)))
298 dasd_info->format = DASD_FORMAT_NONE; 295 dasd_info->format = DASD_FORMAT_NONE;
299 296
300 dasd_info->features |= 297 dasd_info->features |=
301 ((device->features & DASD_FEATURE_READONLY) != 0); 298 ((base->features & DASD_FEATURE_READONLY) != 0);
302 299
303 if (device->discipline) 300 if (base->discipline)
304 memcpy(dasd_info->type, device->discipline->name, 4); 301 memcpy(dasd_info->type, base->discipline->name, 4);
305 else 302 else
306 memcpy(dasd_info->type, "none", 4); 303 memcpy(dasd_info->type, "none", 4);
307 304
308 if (device->request_queue->request_fn) { 305 if (block->request_queue->request_fn) {
309 struct list_head *l; 306 struct list_head *l;
310#ifdef DASD_EXTENDED_PROFILING 307#ifdef DASD_EXTENDED_PROFILING
311 { 308 {
312 struct list_head *l; 309 struct list_head *l;
313 spin_lock_irqsave(&device->lock, flags); 310 spin_lock_irqsave(&block->lock, flags);
314 list_for_each(l, &device->request_queue->queue_head) 311 list_for_each(l, &block->request_queue->queue_head)
315 dasd_info->req_queue_len++; 312 dasd_info->req_queue_len++;
316 spin_unlock_irqrestore(&device->lock, flags); 313 spin_unlock_irqrestore(&block->lock, flags);
317 } 314 }
318#endif /* DASD_EXTENDED_PROFILING */ 315#endif /* DASD_EXTENDED_PROFILING */
319 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 316 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
320 list_for_each(l, &device->ccw_queue) 317 list_for_each(l, &base->ccw_queue)
321 dasd_info->chanq_len++; 318 dasd_info->chanq_len++;
322 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), 319 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
323 flags); 320 flags);
324 } 321 }
325 322
326 rc = 0; 323 rc = 0;
327 if (copy_to_user(argp, dasd_info, 324 if (copy_to_user(argp, dasd_info,
328 ((cmd == (unsigned int) BIODASDINFO2) ? 325 ((cmd == (unsigned int) BIODASDINFO2) ?
329 sizeof (struct dasd_information2_t) : 326 sizeof(struct dasd_information2_t) :
330 sizeof (struct dasd_information_t)))) 327 sizeof(struct dasd_information_t))))
331 rc = -EFAULT; 328 rc = -EFAULT;
332 kfree(dasd_info); 329 kfree(dasd_info);
333 return rc; 330 return rc;
@@ -339,7 +336,7 @@ dasd_ioctl_information(struct dasd_device *device,
339static int 336static int
340dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) 337dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
341{ 338{
342 struct dasd_device *device = bdev->bd_disk->private_data; 339 struct dasd_block *block = bdev->bd_disk->private_data;
343 int intval; 340 int intval;
344 341
345 if (!capable(CAP_SYS_ADMIN)) 342 if (!capable(CAP_SYS_ADMIN))
@@ -351,11 +348,10 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
351 return -EFAULT; 348 return -EFAULT;
352 349
353 set_disk_ro(bdev->bd_disk, intval); 350 set_disk_ro(bdev->bd_disk, intval);
354 return dasd_set_feature(device->cdev, DASD_FEATURE_READONLY, intval); 351 return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval);
355} 352}
356 353
357static int 354static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
358dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd,
359 unsigned long arg) 355 unsigned long arg)
360{ 356{
361 struct cmbdata __user *argp = (void __user *) arg; 357 struct cmbdata __user *argp = (void __user *) arg;
@@ -363,7 +359,7 @@ dasd_ioctl_readall_cmb(struct dasd_device *device, unsigned int cmd,
363 struct cmbdata data; 359 struct cmbdata data;
364 int ret; 360 int ret;
365 361
366 ret = cmf_readall(device->cdev, &data); 362 ret = cmf_readall(block->base->cdev, &data);
367 if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp)))) 363 if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
368 return -EFAULT; 364 return -EFAULT;
369 return ret; 365 return ret;
@@ -374,10 +370,10 @@ dasd_ioctl(struct inode *inode, struct file *file,
374 unsigned int cmd, unsigned long arg) 370 unsigned int cmd, unsigned long arg)
375{ 371{
376 struct block_device *bdev = inode->i_bdev; 372 struct block_device *bdev = inode->i_bdev;
377 struct dasd_device *device = bdev->bd_disk->private_data; 373 struct dasd_block *block = bdev->bd_disk->private_data;
378 void __user *argp = (void __user *)arg; 374 void __user *argp = (void __user *)arg;
379 375
380 if (!device) 376 if (!block)
381 return -ENODEV; 377 return -ENODEV;
382 378
383 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { 379 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
@@ -391,33 +387,33 @@ dasd_ioctl(struct inode *inode, struct file *file,
391 case BIODASDENABLE: 387 case BIODASDENABLE:
392 return dasd_ioctl_enable(bdev); 388 return dasd_ioctl_enable(bdev);
393 case BIODASDQUIESCE: 389 case BIODASDQUIESCE:
394 return dasd_ioctl_quiesce(device); 390 return dasd_ioctl_quiesce(block);
395 case BIODASDRESUME: 391 case BIODASDRESUME:
396 return dasd_ioctl_resume(device); 392 return dasd_ioctl_resume(block);
397 case BIODASDFMT: 393 case BIODASDFMT:
398 return dasd_ioctl_format(bdev, argp); 394 return dasd_ioctl_format(bdev, argp);
399 case BIODASDINFO: 395 case BIODASDINFO:
400 return dasd_ioctl_information(device, cmd, argp); 396 return dasd_ioctl_information(block, cmd, argp);
401 case BIODASDINFO2: 397 case BIODASDINFO2:
402 return dasd_ioctl_information(device, cmd, argp); 398 return dasd_ioctl_information(block, cmd, argp);
403 case BIODASDPRRD: 399 case BIODASDPRRD:
404 return dasd_ioctl_read_profile(device, argp); 400 return dasd_ioctl_read_profile(block, argp);
405 case BIODASDPRRST: 401 case BIODASDPRRST:
406 return dasd_ioctl_reset_profile(device); 402 return dasd_ioctl_reset_profile(block);
407 case BLKROSET: 403 case BLKROSET:
408 return dasd_ioctl_set_ro(bdev, argp); 404 return dasd_ioctl_set_ro(bdev, argp);
409 case DASDAPIVER: 405 case DASDAPIVER:
410 return dasd_ioctl_api_version(argp); 406 return dasd_ioctl_api_version(argp);
411 case BIODASDCMFENABLE: 407 case BIODASDCMFENABLE:
412 return enable_cmf(device->cdev); 408 return enable_cmf(block->base->cdev);
413 case BIODASDCMFDISABLE: 409 case BIODASDCMFDISABLE:
414 return disable_cmf(device->cdev); 410 return disable_cmf(block->base->cdev);
415 case BIODASDREADALLCMB: 411 case BIODASDREADALLCMB:
416 return dasd_ioctl_readall_cmb(device, cmd, arg); 412 return dasd_ioctl_readall_cmb(block, cmd, arg);
417 default: 413 default:
418 /* if the discipline has an ioctl method try it. */ 414 /* if the discipline has an ioctl method try it. */
419 if (device->discipline->ioctl) { 415 if (block->base->discipline->ioctl) {
420 int rval = device->discipline->ioctl(device, cmd, argp); 416 int rval = block->base->discipline->ioctl(block, cmd, argp);
421 if (rval != -ENOIOCTLCMD) 417 if (rval != -ENOIOCTLCMD)
422 return rval; 418 return rval;
423 } 419 }
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index ac7e8ef504cb..0584a7dc5ede 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -54,11 +54,16 @@ static int
54dasd_devices_show(struct seq_file *m, void *v) 54dasd_devices_show(struct seq_file *m, void *v)
55{ 55{
56 struct dasd_device *device; 56 struct dasd_device *device;
57 struct dasd_block *block;
57 char *substr; 58 char *substr;
58 59
59 device = dasd_device_from_devindex((unsigned long) v - 1); 60 device = dasd_device_from_devindex((unsigned long) v - 1);
60 if (IS_ERR(device)) 61 if (IS_ERR(device))
61 return 0; 62 return 0;
63 if (device->block)
64 block = device->block;
65 else
66 return 0;
62 /* Print device number. */ 67 /* Print device number. */
63 seq_printf(m, "%s", device->cdev->dev.bus_id); 68 seq_printf(m, "%s", device->cdev->dev.bus_id);
64 /* Print discipline string. */ 69 /* Print discipline string. */
@@ -67,14 +72,14 @@ dasd_devices_show(struct seq_file *m, void *v)
67 else 72 else
68 seq_printf(m, "(none)"); 73 seq_printf(m, "(none)");
69 /* Print kdev. */ 74 /* Print kdev. */
70 if (device->gdp) 75 if (block->gdp)
71 seq_printf(m, " at (%3d:%6d)", 76 seq_printf(m, " at (%3d:%6d)",
72 device->gdp->major, device->gdp->first_minor); 77 block->gdp->major, block->gdp->first_minor);
73 else 78 else
74 seq_printf(m, " at (???:??????)"); 79 seq_printf(m, " at (???:??????)");
75 /* Print device name. */ 80 /* Print device name. */
76 if (device->gdp) 81 if (block->gdp)
77 seq_printf(m, " is %-8s", device->gdp->disk_name); 82 seq_printf(m, " is %-8s", block->gdp->disk_name);
78 else 83 else
79 seq_printf(m, " is ????????"); 84 seq_printf(m, " is ????????");
80 /* Print devices features. */ 85 /* Print devices features. */
@@ -100,14 +105,14 @@ dasd_devices_show(struct seq_file *m, void *v)
100 case DASD_STATE_READY: 105 case DASD_STATE_READY:
101 case DASD_STATE_ONLINE: 106 case DASD_STATE_ONLINE:
102 seq_printf(m, "active "); 107 seq_printf(m, "active ");
103 if (dasd_check_blocksize(device->bp_block)) 108 if (dasd_check_blocksize(block->bp_block))
104 seq_printf(m, "n/f "); 109 seq_printf(m, "n/f ");
105 else 110 else
106 seq_printf(m, 111 seq_printf(m,
107 "at blocksize: %d, %ld blocks, %ld MB", 112 "at blocksize: %d, %ld blocks, %ld MB",
108 device->bp_block, device->blocks, 113 block->bp_block, block->blocks,
109 ((device->bp_block >> 9) * 114 ((block->bp_block >> 9) *
110 device->blocks) >> 11); 115 block->blocks) >> 11);
111 break; 116 break;
112 default: 117 default:
113 seq_printf(m, "no stat"); 118 seq_printf(m, "no stat");