diff options
author | Horst Hummel <horst.hummel@de.ibm.com> | 2006-08-30 08:33:33 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-08-30 08:33:33 -0400 |
commit | 8f61701bdf536c7a80f0f614bac91c7883804c4c (patch) | |
tree | 59cacbdf8dc8e5677556f5c95c1b11a775ff47de /drivers/s390 | |
parent | af313e5a4f56b248767d667c3c2436dda767df77 (diff) |
[S390] dasd: fix device shutdown process.
Fix clear_IO handling (need to wait for interrupt) and
introduced error-handling in shutdown processing.
Signed-off-by: Horst Hummel <horst.hummel@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd.c | 192 | ||||
-rw-r--r-- | drivers/s390/block/dasd_genhd.c | 10 |
2 files changed, 137 insertions, 65 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index d8e9b95f0a1a..25c1ef6dfd44 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -52,7 +52,7 @@ static void dasd_setup_queue(struct dasd_device * device); | |||
52 | static void dasd_free_queue(struct dasd_device * device); | 52 | static void dasd_free_queue(struct dasd_device * device); |
53 | static void dasd_flush_request_queue(struct dasd_device *); | 53 | static void dasd_flush_request_queue(struct dasd_device *); |
54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | 54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); |
55 | static void dasd_flush_ccw_queue(struct dasd_device *, int); | 55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); |
56 | static void dasd_tasklet(struct dasd_device *); | 56 | static void dasd_tasklet(struct dasd_device *); |
57 | static void do_kick_device(void *data); | 57 | static void do_kick_device(void *data); |
58 | 58 | ||
@@ -60,6 +60,7 @@ static void do_kick_device(void *data); | |||
60 | * SECTION: Operations on the device structure. | 60 | * SECTION: Operations on the device structure. |
61 | */ | 61 | */ |
62 | static wait_queue_head_t dasd_init_waitq; | 62 | static wait_queue_head_t dasd_init_waitq; |
63 | static wait_queue_head_t dasd_flush_wq; | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * Allocate memory for a new device structure. | 66 | * Allocate memory for a new device structure. |
@@ -121,7 +122,7 @@ dasd_free_device(struct dasd_device *device) | |||
121 | /* | 122 | /* |
122 | * Make a new device known to the system. | 123 | * Make a new device known to the system. |
123 | */ | 124 | */ |
124 | static inline int | 125 | static int |
125 | dasd_state_new_to_known(struct dasd_device *device) | 126 | dasd_state_new_to_known(struct dasd_device *device) |
126 | { | 127 | { |
127 | int rc; | 128 | int rc; |
@@ -145,7 +146,7 @@ dasd_state_new_to_known(struct dasd_device *device) | |||
145 | /* | 146 | /* |
146 | * Let the system forget about a device. | 147 | * Let the system forget about a device. |
147 | */ | 148 | */ |
148 | static inline void | 149 | static int |
149 | dasd_state_known_to_new(struct dasd_device * device) | 150 | dasd_state_known_to_new(struct dasd_device * device) |
150 | { | 151 | { |
151 | /* Disable extended error reporting for this device. */ | 152 | /* Disable extended error reporting for this device. */ |
@@ -163,12 +164,13 @@ dasd_state_known_to_new(struct dasd_device * device) | |||
163 | 164 | ||
164 | /* Give up reference we took in dasd_state_new_to_known. */ | 165 | /* Give up reference we took in dasd_state_new_to_known. */ |
165 | dasd_put_device(device); | 166 | dasd_put_device(device); |
167 | return 0; | ||
166 | } | 168 | } |
167 | 169 | ||
168 | /* | 170 | /* |
169 | * Request the irq line for the device. | 171 | * Request the irq line for the device. |
170 | */ | 172 | */ |
171 | static inline int | 173 | static int |
172 | dasd_state_known_to_basic(struct dasd_device * device) | 174 | dasd_state_known_to_basic(struct dasd_device * device) |
173 | { | 175 | { |
174 | int rc; | 176 | int rc; |
@@ -192,17 +194,23 @@ dasd_state_known_to_basic(struct dasd_device * device) | |||
192 | /* | 194 | /* |
193 | * Release the irq line for the device. Terminate any running i/o. | 195 | * Release the irq line for the device. Terminate any running i/o. |
194 | */ | 196 | */ |
195 | static inline void | 197 | static int |
196 | dasd_state_basic_to_known(struct dasd_device * device) | 198 | dasd_state_basic_to_known(struct dasd_device * device) |
197 | { | 199 | { |
200 | int rc; | ||
201 | |||
198 | dasd_gendisk_free(device); | 202 | dasd_gendisk_free(device); |
199 | dasd_flush_ccw_queue(device, 1); | 203 | rc = dasd_flush_ccw_queue(device, 1); |
204 | if (rc) | ||
205 | return rc; | ||
206 | |||
200 | DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); | 207 | DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); |
201 | if (device->debug_area != NULL) { | 208 | if (device->debug_area != NULL) { |
202 | debug_unregister(device->debug_area); | 209 | debug_unregister(device->debug_area); |
203 | device->debug_area = NULL; | 210 | device->debug_area = NULL; |
204 | } | 211 | } |
205 | device->state = DASD_STATE_KNOWN; | 212 | device->state = DASD_STATE_KNOWN; |
213 | return 0; | ||
206 | } | 214 | } |
207 | 215 | ||
208 | /* | 216 | /* |
@@ -219,7 +227,7 @@ dasd_state_basic_to_known(struct dasd_device * device) | |||
219 | * In case the analysis returns an error, the device setup is stopped | 227 | * In case the analysis returns an error, the device setup is stopped |
220 | * (a fake disk was already added to allow formatting). | 228 | * (a fake disk was already added to allow formatting). |
221 | */ | 229 | */ |
222 | static inline int | 230 | static int |
223 | dasd_state_basic_to_ready(struct dasd_device * device) | 231 | dasd_state_basic_to_ready(struct dasd_device * device) |
224 | { | 232 | { |
225 | int rc; | 233 | int rc; |
@@ -247,25 +255,31 @@ dasd_state_basic_to_ready(struct dasd_device * device) | |||
247 | * Forget format information. Check if the target level is basic | 255 | * Forget format information. Check if the target level is basic |
248 | * and if it is create fake disk for formatting. | 256 | * and if it is create fake disk for formatting. |
249 | */ | 257 | */ |
250 | static inline void | 258 | static int |
251 | dasd_state_ready_to_basic(struct dasd_device * device) | 259 | dasd_state_ready_to_basic(struct dasd_device * device) |
252 | { | 260 | { |
253 | dasd_flush_ccw_queue(device, 0); | 261 | int rc; |
262 | |||
263 | rc = dasd_flush_ccw_queue(device, 0); | ||
264 | if (rc) | ||
265 | return rc; | ||
254 | dasd_destroy_partitions(device); | 266 | dasd_destroy_partitions(device); |
255 | dasd_flush_request_queue(device); | 267 | dasd_flush_request_queue(device); |
256 | device->blocks = 0; | 268 | device->blocks = 0; |
257 | device->bp_block = 0; | 269 | device->bp_block = 0; |
258 | device->s2b_shift = 0; | 270 | device->s2b_shift = 0; |
259 | device->state = DASD_STATE_BASIC; | 271 | device->state = DASD_STATE_BASIC; |
272 | return 0; | ||
260 | } | 273 | } |
261 | 274 | ||
262 | /* | 275 | /* |
263 | * Back to basic. | 276 | * Back to basic. |
264 | */ | 277 | */ |
265 | static inline void | 278 | static int |
266 | dasd_state_unfmt_to_basic(struct dasd_device * device) | 279 | dasd_state_unfmt_to_basic(struct dasd_device * device) |
267 | { | 280 | { |
268 | device->state = DASD_STATE_BASIC; | 281 | device->state = DASD_STATE_BASIC; |
282 | return 0; | ||
269 | } | 283 | } |
270 | 284 | ||
271 | /* | 285 | /* |
@@ -273,7 +287,7 @@ dasd_state_unfmt_to_basic(struct dasd_device * device) | |||
273 | * the requeueing of requests from the linux request queue to the | 287 | * the requeueing of requests from the linux request queue to the |
274 | * ccw queue. | 288 | * ccw queue. |
275 | */ | 289 | */ |
276 | static inline int | 290 | static int |
277 | dasd_state_ready_to_online(struct dasd_device * device) | 291 | dasd_state_ready_to_online(struct dasd_device * device) |
278 | { | 292 | { |
279 | device->state = DASD_STATE_ONLINE; | 293 | device->state = DASD_STATE_ONLINE; |
@@ -284,16 +298,17 @@ dasd_state_ready_to_online(struct dasd_device * device) | |||
284 | /* | 298 | /* |
285 | * Stop the requeueing of requests again. | 299 | * Stop the requeueing of requests again. |
286 | */ | 300 | */ |
287 | static inline void | 301 | static int |
288 | dasd_state_online_to_ready(struct dasd_device * device) | 302 | dasd_state_online_to_ready(struct dasd_device * device) |
289 | { | 303 | { |
290 | device->state = DASD_STATE_READY; | 304 | device->state = DASD_STATE_READY; |
305 | return 0; | ||
291 | } | 306 | } |
292 | 307 | ||
293 | /* | 308 | /* |
294 | * Device startup state changes. | 309 | * Device startup state changes. |
295 | */ | 310 | */ |
296 | static inline int | 311 | static int |
297 | dasd_increase_state(struct dasd_device *device) | 312 | dasd_increase_state(struct dasd_device *device) |
298 | { | 313 | { |
299 | int rc; | 314 | int rc; |
@@ -329,30 +344,37 @@ dasd_increase_state(struct dasd_device *device) | |||
329 | /* | 344 | /* |
330 | * Device shutdown state changes. | 345 | * Device shutdown state changes. |
331 | */ | 346 | */ |
332 | static inline int | 347 | static int |
333 | dasd_decrease_state(struct dasd_device *device) | 348 | dasd_decrease_state(struct dasd_device *device) |
334 | { | 349 | { |
350 | int rc; | ||
351 | |||
352 | rc = 0; | ||
335 | if (device->state == DASD_STATE_ONLINE && | 353 | if (device->state == DASD_STATE_ONLINE && |
336 | device->target <= DASD_STATE_READY) | 354 | device->target <= DASD_STATE_READY) |
337 | dasd_state_online_to_ready(device); | 355 | rc = dasd_state_online_to_ready(device); |
338 | 356 | ||
339 | if (device->state == DASD_STATE_READY && | 357 | if (!rc && |
358 | device->state == DASD_STATE_READY && | ||
340 | device->target <= DASD_STATE_BASIC) | 359 | device->target <= DASD_STATE_BASIC) |
341 | dasd_state_ready_to_basic(device); | 360 | rc = dasd_state_ready_to_basic(device); |
342 | 361 | ||
343 | if (device->state == DASD_STATE_UNFMT && | 362 | if (!rc && |
363 | device->state == DASD_STATE_UNFMT && | ||
344 | device->target <= DASD_STATE_BASIC) | 364 | device->target <= DASD_STATE_BASIC) |
345 | dasd_state_unfmt_to_basic(device); | 365 | rc = dasd_state_unfmt_to_basic(device); |
346 | 366 | ||
347 | if (device->state == DASD_STATE_BASIC && | 367 | if (!rc && |
368 | device->state == DASD_STATE_BASIC && | ||
348 | device->target <= DASD_STATE_KNOWN) | 369 | device->target <= DASD_STATE_KNOWN) |
349 | dasd_state_basic_to_known(device); | 370 | rc = dasd_state_basic_to_known(device); |
350 | 371 | ||
351 | if (device->state == DASD_STATE_KNOWN && | 372 | if (!rc && |
373 | device->state == DASD_STATE_KNOWN && | ||
352 | device->target <= DASD_STATE_NEW) | 374 | device->target <= DASD_STATE_NEW) |
353 | dasd_state_known_to_new(device); | 375 | rc = dasd_state_known_to_new(device); |
354 | 376 | ||
355 | return 0; | 377 | return rc; |
356 | } | 378 | } |
357 | 379 | ||
358 | /* | 380 | /* |
@@ -701,6 +723,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr) | |||
701 | cqr->retries--; | 723 | cqr->retries--; |
702 | cqr->status = DASD_CQR_CLEAR; | 724 | cqr->status = DASD_CQR_CLEAR; |
703 | cqr->stopclk = get_clock(); | 725 | cqr->stopclk = get_clock(); |
726 | cqr->starttime = 0; | ||
704 | DBF_DEV_EVENT(DBF_DEBUG, device, | 727 | DBF_DEV_EVENT(DBF_DEBUG, device, |
705 | "terminate cqr %p successful", | 728 | "terminate cqr %p successful", |
706 | cqr); | 729 | cqr); |
@@ -978,6 +1001,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
978 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1001 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { |
979 | cqr->status = DASD_CQR_QUEUED; | 1002 | cqr->status = DASD_CQR_QUEUED; |
980 | dasd_clear_timer(device); | 1003 | dasd_clear_timer(device); |
1004 | wake_up(&dasd_flush_wq); | ||
981 | dasd_schedule_bh(device); | 1005 | dasd_schedule_bh(device); |
982 | return; | 1006 | return; |
983 | } | 1007 | } |
@@ -1241,6 +1265,10 @@ __dasd_check_expire(struct dasd_device * device) | |||
1241 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1265 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); |
1242 | if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) { | 1266 | if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) { |
1243 | if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) { | 1267 | if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) { |
1268 | DEV_MESSAGE(KERN_ERR, device, | ||
1269 | "internal error - timeout (%is) expired " | ||
1270 | "for cqr %p (%i retries left)", | ||
1271 | (cqr->expires/HZ), cqr, cqr->retries); | ||
1244 | if (device->discipline->term_IO(cqr) != 0) | 1272 | if (device->discipline->term_IO(cqr) != 0) |
1245 | /* Hmpf, try again in 1/10 sec */ | 1273 | /* Hmpf, try again in 1/10 sec */ |
1246 | dasd_set_timer(device, 10); | 1274 | dasd_set_timer(device, 10); |
@@ -1285,46 +1313,100 @@ __dasd_start_head(struct dasd_device * device) | |||
1285 | dasd_set_timer(device, 50); | 1313 | dasd_set_timer(device, 50); |
1286 | } | 1314 | } |
1287 | 1315 | ||
1316 | static inline int | ||
1317 | _wait_for_clear(struct dasd_ccw_req *cqr) | ||
1318 | { | ||
1319 | return (cqr->status == DASD_CQR_QUEUED); | ||
1320 | } | ||
1321 | |||
1288 | /* | 1322 | /* |
1289 | * Remove requests from the ccw queue. | 1323 | * Remove all requests from the ccw queue (all = '1') or only block device |
1324 | * requests in case all = '0'. | ||
1325 | * Take care of the erp-chain (chained via cqr->refers) and remove either | ||
1326 | * the whole erp-chain or none of the erp-requests. | ||
1327 | * If a request is currently running, term_IO is called and the request | ||
1328 | * is re-queued. Prior to removing the terminated request we need to wait | ||
1329 | * for the clear-interrupt. | ||
1330 | * In case termination is not possible we stop processing and just finishing | ||
1331 | * the already moved requests. | ||
1290 | */ | 1332 | */ |
1291 | static void | 1333 | static int |
1292 | dasd_flush_ccw_queue(struct dasd_device * device, int all) | 1334 | dasd_flush_ccw_queue(struct dasd_device * device, int all) |
1293 | { | 1335 | { |
1336 | struct dasd_ccw_req *cqr, *orig, *n; | ||
1337 | int rc, i; | ||
1338 | |||
1294 | struct list_head flush_queue; | 1339 | struct list_head flush_queue; |
1295 | struct list_head *l, *n; | ||
1296 | struct dasd_ccw_req *cqr; | ||
1297 | 1340 | ||
1298 | INIT_LIST_HEAD(&flush_queue); | 1341 | INIT_LIST_HEAD(&flush_queue); |
1299 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1342 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1300 | list_for_each_safe(l, n, &device->ccw_queue) { | 1343 | rc = 0; |
1301 | cqr = list_entry(l, struct dasd_ccw_req, list); | 1344 | restart: |
1345 | list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { | ||
1346 | /* get original request of erp request-chain */ | ||
1347 | for (orig = cqr; orig->refers != NULL; orig = orig->refers); | ||
1348 | |||
1302 | /* Flush all request or only block device requests? */ | 1349 | /* Flush all request or only block device requests? */ |
1303 | if (all == 0 && cqr->callback == dasd_end_request_cb) | 1350 | if (all == 0 && cqr->callback != dasd_end_request_cb && |
1351 | orig->callback != dasd_end_request_cb) { | ||
1304 | continue; | 1352 | continue; |
1305 | if (cqr->status == DASD_CQR_IN_IO) | 1353 | } |
1306 | device->discipline->term_IO(cqr); | 1354 | /* Check status and move request to flush_queue */ |
1307 | if (cqr->status != DASD_CQR_DONE || | 1355 | switch (cqr->status) { |
1308 | cqr->status != DASD_CQR_FAILED) { | 1356 | case DASD_CQR_IN_IO: |
1309 | cqr->status = DASD_CQR_FAILED; | 1357 | rc = device->discipline->term_IO(cqr); |
1358 | if (rc) { | ||
1359 | /* unable to terminate requeust */ | ||
1360 | DEV_MESSAGE(KERN_ERR, device, | ||
1361 | "dasd flush ccw_queue is unable " | ||
1362 | " to terminate request %p", | ||
1363 | cqr); | ||
1364 | /* stop flush processing */ | ||
1365 | goto finished; | ||
1366 | } | ||
1367 | break; | ||
1368 | case DASD_CQR_QUEUED: | ||
1369 | case DASD_CQR_ERROR: | ||
1370 | /* set request to FAILED */ | ||
1310 | cqr->stopclk = get_clock(); | 1371 | cqr->stopclk = get_clock(); |
1372 | cqr->status = DASD_CQR_FAILED; | ||
1373 | break; | ||
1374 | default: /* do not touch the others */ | ||
1375 | break; | ||
1376 | } | ||
1377 | /* Rechain request (including erp chain) */ | ||
1378 | for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { | ||
1379 | cqr->endclk = get_clock(); | ||
1380 | list_move_tail(&cqr->list, &flush_queue); | ||
1381 | } | ||
1382 | if (i > 1) | ||
1383 | /* moved more than one request - need to restart */ | ||
1384 | goto restart; | ||
1385 | } | ||
1386 | |||
1387 | finished: | ||
1388 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1389 | /* Now call the callback function of flushed requests */ | ||
1390 | restart_cb: | ||
1391 | list_for_each_entry_safe(cqr, n, &flush_queue, list) { | ||
1392 | if (cqr->status == DASD_CQR_CLEAR) { | ||
1393 | /* wait for clear interrupt! */ | ||
1394 | wait_event(dasd_flush_wq, _wait_for_clear(cqr)); | ||
1395 | cqr->status = DASD_CQR_FAILED; | ||
1311 | } | 1396 | } |
1312 | /* Process finished ERP request. */ | 1397 | /* Process finished ERP request. */ |
1313 | if (cqr->refers) { | 1398 | if (cqr->refers) { |
1314 | __dasd_process_erp(device, cqr); | 1399 | __dasd_process_erp(device, cqr); |
1315 | continue; | 1400 | /* restart list_for_xx loop since dasd_process_erp |
1401 | * might remove multiple elements */ | ||
1402 | goto restart_cb; | ||
1316 | } | 1403 | } |
1317 | /* Rechain request on device request queue */ | 1404 | /* call the callback function */ |
1318 | cqr->endclk = get_clock(); | 1405 | cqr->endclk = get_clock(); |
1319 | list_move_tail(&cqr->list, &flush_queue); | ||
1320 | } | ||
1321 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1322 | /* Now call the callback function of flushed requests */ | ||
1323 | list_for_each_safe(l, n, &flush_queue) { | ||
1324 | cqr = list_entry(l, struct dasd_ccw_req, list); | ||
1325 | if (cqr->callback != NULL) | 1406 | if (cqr->callback != NULL) |
1326 | (cqr->callback)(cqr, cqr->callback_data); | 1407 | (cqr->callback)(cqr, cqr->callback_data); |
1327 | } | 1408 | } |
1409 | return rc; | ||
1328 | } | 1410 | } |
1329 | 1411 | ||
1330 | /* | 1412 | /* |
@@ -1510,10 +1592,8 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) | |||
1510 | if (device->discipline->term_IO) { | 1592 | if (device->discipline->term_IO) { |
1511 | cqr->retries = -1; | 1593 | cqr->retries = -1; |
1512 | device->discipline->term_IO(cqr); | 1594 | device->discipline->term_IO(cqr); |
1513 | /*nished = | 1595 | /* wait (non-interruptible) for final status |
1514 | * wait (non-interruptible) for final status | 1596 | * because signal ist still pending */ |
1515 | * because signal ist still pending | ||
1516 | */ | ||
1517 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1597 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1518 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1598 | wait_event(wait_q, _wait_for_wakeup(cqr)); |
1519 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1599 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
@@ -1546,19 +1626,11 @@ static inline int | |||
1546 | _dasd_term_running_cqr(struct dasd_device *device) | 1626 | _dasd_term_running_cqr(struct dasd_device *device) |
1547 | { | 1627 | { |
1548 | struct dasd_ccw_req *cqr; | 1628 | struct dasd_ccw_req *cqr; |
1549 | int rc; | ||
1550 | 1629 | ||
1551 | if (list_empty(&device->ccw_queue)) | 1630 | if (list_empty(&device->ccw_queue)) |
1552 | return 0; | 1631 | return 0; |
1553 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1632 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); |
1554 | rc = device->discipline->term_IO(cqr); | 1633 | return device->discipline->term_IO(cqr); |
1555 | if (rc == 0) { | ||
1556 | /* termination successful */ | ||
1557 | cqr->status = DASD_CQR_QUEUED; | ||
1558 | cqr->startclk = cqr->stopclk = 0; | ||
1559 | cqr->starttime = 0; | ||
1560 | } | ||
1561 | return rc; | ||
1562 | } | 1634 | } |
1563 | 1635 | ||
1564 | int | 1636 | int |
@@ -1726,10 +1798,7 @@ dasd_flush_request_queue(struct dasd_device * device) | |||
1726 | return; | 1798 | return; |
1727 | 1799 | ||
1728 | spin_lock_irq(&device->request_queue_lock); | 1800 | spin_lock_irq(&device->request_queue_lock); |
1729 | while (!list_empty(&device->request_queue->queue_head)) { | 1801 | while ((req = elv_next_request(device->request_queue))) { |
1730 | req = elv_next_request(device->request_queue); | ||
1731 | if (req == NULL) | ||
1732 | break; | ||
1733 | blkdev_dequeue_request(req); | 1802 | blkdev_dequeue_request(req); |
1734 | dasd_end_request(req, 0); | 1803 | dasd_end_request(req, 0); |
1735 | } | 1804 | } |
@@ -2091,6 +2160,7 @@ dasd_init(void) | |||
2091 | int rc; | 2160 | int rc; |
2092 | 2161 | ||
2093 | init_waitqueue_head(&dasd_init_waitq); | 2162 | init_waitqueue_head(&dasd_init_waitq); |
2163 | init_waitqueue_head(&dasd_flush_wq); | ||
2094 | 2164 | ||
2095 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ | 2165 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ |
2096 | dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); | 2166 | dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); |
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 4c272b70f41a..d163632101d2 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c | |||
@@ -83,10 +83,12 @@ dasd_gendisk_alloc(struct dasd_device *device) | |||
83 | void | 83 | void |
84 | dasd_gendisk_free(struct dasd_device *device) | 84 | dasd_gendisk_free(struct dasd_device *device) |
85 | { | 85 | { |
86 | del_gendisk(device->gdp); | 86 | if (device->gdp) { |
87 | device->gdp->queue = NULL; | 87 | del_gendisk(device->gdp); |
88 | put_disk(device->gdp); | 88 | device->gdp->queue = NULL; |
89 | device->gdp = NULL; | 89 | put_disk(device->gdp); |
90 | device->gdp = NULL; | ||
91 | } | ||
90 | } | 92 | } |
91 | 93 | ||
92 | /* | 94 | /* |