diff options
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index ee0e5cafa859..1a95272cca50 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end); | |||
473 | * blk_cleanup_queue - shutdown a request queue | 473 | * blk_cleanup_queue - shutdown a request queue |
474 | * @q: request queue to shutdown | 474 | * @q: request queue to shutdown |
475 | * | 475 | * |
476 | * Mark @q DEAD, drain all pending requests, destroy and put it. All | 476 | * Mark @q DYING, drain all pending requests, destroy and put it. All |
477 | * future requests will be failed immediately with -ENODEV. | 477 | * future requests will be failed immediately with -ENODEV. |
478 | */ | 478 | */ |
479 | void blk_cleanup_queue(struct request_queue *q) | 479 | void blk_cleanup_queue(struct request_queue *q) |
480 | { | 480 | { |
481 | spinlock_t *lock = q->queue_lock; | 481 | spinlock_t *lock = q->queue_lock; |
482 | 482 | ||
483 | /* mark @q DEAD, no new request or merges will be allowed afterwards */ | 483 | /* mark @q DYING, no new request or merges will be allowed afterwards */ |
484 | mutex_lock(&q->sysfs_lock); | 484 | mutex_lock(&q->sysfs_lock); |
485 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 485 | queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); |
486 | spin_lock_irq(lock); | 486 | spin_lock_irq(lock); |
487 | 487 | ||
488 | /* | 488 | /* |
489 | * Dead queue is permanently in bypass mode till released. Note | 489 | * A dying queue is permanently in bypass mode till released. Note |
490 | * that, unlike blk_queue_bypass_start(), we aren't performing | 490 | * that, unlike blk_queue_bypass_start(), we aren't performing |
491 | * synchronize_rcu() after entering bypass mode to avoid the delay | 491 | * synchronize_rcu() after entering bypass mode to avoid the delay |
492 | * as some drivers create and destroy a lot of queues while | 492 | * as some drivers create and destroy a lot of queues while |
@@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q) | |||
499 | 499 | ||
500 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | 500 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
501 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | 501 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
502 | queue_flag_set(QUEUE_FLAG_DEAD, q); | 502 | queue_flag_set(QUEUE_FLAG_DYING, q); |
503 | spin_unlock_irq(lock); | 503 | spin_unlock_irq(lock); |
504 | mutex_unlock(&q->sysfs_lock); | 504 | mutex_unlock(&q->sysfs_lock); |
505 | 505 | ||
506 | /* drain all requests queued before DEAD marking */ | 506 | /* drain all requests queued before DYING marking */ |
507 | blk_drain_queue(q, true); | 507 | blk_drain_queue(q, true); |
508 | 508 | ||
509 | /* @q won't process any more request, flush async actions */ | 509 | /* @q won't process any more request, flush async actions */ |
@@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue); | |||
716 | 716 | ||
717 | bool blk_get_queue(struct request_queue *q) | 717 | bool blk_get_queue(struct request_queue *q) |
718 | { | 718 | { |
719 | if (likely(!blk_queue_dead(q))) { | 719 | if (likely(!blk_queue_dying(q))) { |
720 | __blk_get_queue(q); | 720 | __blk_get_queue(q); |
721 | return true; | 721 | return true; |
722 | } | 722 | } |
@@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, | |||
870 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 870 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
871 | int may_queue; | 871 | int may_queue; |
872 | 872 | ||
873 | if (unlikely(blk_queue_dead(q))) | 873 | if (unlikely(blk_queue_dying(q))) |
874 | return NULL; | 874 | return NULL; |
875 | 875 | ||
876 | may_queue = elv_may_queue(q, rw_flags); | 876 | may_queue = elv_may_queue(q, rw_flags); |
@@ -1050,7 +1050,7 @@ retry: | |||
1050 | if (rq) | 1050 | if (rq) |
1051 | return rq; | 1051 | return rq; |
1052 | 1052 | ||
1053 | if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { | 1053 | if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { |
1054 | blk_put_rl(rl); | 1054 | blk_put_rl(rl); |
1055 | return NULL; | 1055 | return NULL; |
1056 | } | 1056 | } |
@@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
1910 | return -EIO; | 1910 | return -EIO; |
1911 | 1911 | ||
1912 | spin_lock_irqsave(q->queue_lock, flags); | 1912 | spin_lock_irqsave(q->queue_lock, flags); |
1913 | if (unlikely(blk_queue_dead(q))) { | 1913 | if (unlikely(blk_queue_dying(q))) { |
1914 | spin_unlock_irqrestore(q->queue_lock, flags); | 1914 | spin_unlock_irqrestore(q->queue_lock, flags); |
1915 | return -ENODEV; | 1915 | return -ENODEV; |
1916 | } | 1916 | } |
@@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, | |||
2885 | trace_block_unplug(q, depth, !from_schedule); | 2885 | trace_block_unplug(q, depth, !from_schedule); |
2886 | 2886 | ||
2887 | /* | 2887 | /* |
2888 | * Don't mess with dead queue. | 2888 | * Don't mess with a dying queue. |
2889 | */ | 2889 | */ |
2890 | if (unlikely(blk_queue_dead(q))) { | 2890 | if (unlikely(blk_queue_dying(q))) { |
2891 | spin_unlock(q->queue_lock); | 2891 | spin_unlock(q->queue_lock); |
2892 | return; | 2892 | return; |
2893 | } | 2893 | } |
@@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
2996 | /* | 2996 | /* |
2997 | * Short-circuit if @q is dead | 2997 | * Short-circuit if @q is dead |
2998 | */ | 2998 | */ |
2999 | if (unlikely(blk_queue_dead(q))) { | 2999 | if (unlikely(blk_queue_dying(q))) { |
3000 | __blk_end_request_all(rq, -ENODEV); | 3000 | __blk_end_request_all(rq, -ENODEV); |
3001 | continue; | 3001 | continue; |
3002 | } | 3002 | } |