diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 57 |
1 files changed, 36 insertions, 21 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 034cbb2024f0..7e1523521c70 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -349,11 +349,13 @@ EXPORT_SYMBOL(blk_put_queue); | |||
349 | /** | 349 | /** |
350 | * blk_drain_queue - drain requests from request_queue | 350 | * blk_drain_queue - drain requests from request_queue |
351 | * @q: queue to drain | 351 | * @q: queue to drain |
352 | * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV | ||
352 | * | 353 | * |
353 | * Drain ELV_PRIV requests from @q. The caller is responsible for ensuring | 354 | * Drain requests from @q. If @drain_all is set, all requests are drained. |
354 | * that no new requests which need to be drained are queued. | 355 | * If not, only ELVPRIV requests are drained. The caller is responsible |
356 | * for ensuring that no new requests which need to be drained are queued. | ||
355 | */ | 357 | */ |
356 | void blk_drain_queue(struct request_queue *q) | 358 | void blk_drain_queue(struct request_queue *q, bool drain_all) |
357 | { | 359 | { |
358 | while (true) { | 360 | while (true) { |
359 | int nr_rqs; | 361 | int nr_rqs; |
@@ -361,9 +363,15 @@ void blk_drain_queue(struct request_queue *q) | |||
361 | spin_lock_irq(q->queue_lock); | 363 | spin_lock_irq(q->queue_lock); |
362 | 364 | ||
363 | elv_drain_elevator(q); | 365 | elv_drain_elevator(q); |
366 | if (drain_all) | ||
367 | blk_throtl_drain(q); | ||
364 | 368 | ||
365 | __blk_run_queue(q); | 369 | __blk_run_queue(q); |
366 | nr_rqs = q->rq.elvpriv; | 370 | |
371 | if (drain_all) | ||
372 | nr_rqs = q->rq.count[0] + q->rq.count[1]; | ||
373 | else | ||
374 | nr_rqs = q->rq.elvpriv; | ||
367 | 375 | ||
368 | spin_unlock_irq(q->queue_lock); | 376 | spin_unlock_irq(q->queue_lock); |
369 | 377 | ||
@@ -373,30 +381,40 @@ void blk_drain_queue(struct request_queue *q) | |||
373 | } | 381 | } |
374 | } | 382 | } |
375 | 383 | ||
376 | /* | 384 | /** |
377 | * Note: If a driver supplied the queue lock, it is disconnected | 385 | * blk_cleanup_queue - shutdown a request queue |
378 | * by this function. The actual state of the lock doesn't matter | 386 | * @q: request queue to shutdown |
379 | * here as the request_queue isn't accessible after this point | 387 | * |
380 | * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. | 388 | * Mark @q DEAD, drain all pending requests, destroy and put it. All |
389 | * future requests will be failed immediately with -ENODEV. | ||
381 | */ | 390 | */ |
382 | void blk_cleanup_queue(struct request_queue *q) | 391 | void blk_cleanup_queue(struct request_queue *q) |
383 | { | 392 | { |
384 | /* | 393 | spinlock_t *lock = q->queue_lock; |
385 | * We know we have process context here, so we can be a little | ||
386 | * cautious and ensure that pending block actions on this device | ||
387 | * are done before moving on. Going into this function, we should | ||
388 | * not have processes doing IO to this device. | ||
389 | */ | ||
390 | blk_sync_queue(q); | ||
391 | 394 | ||
392 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | 395 | /* mark @q DEAD, no new request or merges will be allowed afterwards */ |
393 | mutex_lock(&q->sysfs_lock); | 396 | mutex_lock(&q->sysfs_lock); |
394 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 397 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
395 | mutex_unlock(&q->sysfs_lock); | 398 | |
399 | spin_lock_irq(lock); | ||
400 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | ||
401 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | ||
402 | queue_flag_set(QUEUE_FLAG_DEAD, q); | ||
396 | 403 | ||
397 | if (q->queue_lock != &q->__queue_lock) | 404 | if (q->queue_lock != &q->__queue_lock) |
398 | q->queue_lock = &q->__queue_lock; | 405 | q->queue_lock = &q->__queue_lock; |
399 | 406 | ||
407 | spin_unlock_irq(lock); | ||
408 | mutex_unlock(&q->sysfs_lock); | ||
409 | |||
410 | /* drain all requests queued before DEAD marking */ | ||
411 | blk_drain_queue(q, true); | ||
412 | |||
413 | /* @q won't process any more request, flush async actions */ | ||
414 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | ||
415 | blk_sync_queue(q); | ||
416 | |||
417 | /* @q is and will stay empty, shutdown and put */ | ||
400 | blk_put_queue(q); | 418 | blk_put_queue(q); |
401 | } | 419 | } |
402 | EXPORT_SYMBOL(blk_cleanup_queue); | 420 | EXPORT_SYMBOL(blk_cleanup_queue); |
@@ -1509,9 +1527,6 @@ generic_make_request_checks(struct bio *bio) | |||
1509 | goto end_io; | 1527 | goto end_io; |
1510 | } | 1528 | } |
1511 | 1529 | ||
1512 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
1513 | goto end_io; | ||
1514 | |||
1515 | part = bio->bi_bdev->bd_part; | 1530 | part = bio->bi_bdev->bd_part; |
1516 | if (should_fail_request(part, bio->bi_size) || | 1531 | if (should_fail_request(part, bio->bi_size) || |
1517 | should_fail_request(&part_to_disk(part)->part0, | 1532 | should_fail_request(&part_to_disk(part)->part0, |