aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--include/linux/blkdev.h4
8 files changed, 22 insertions, 22 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d0b770391ad4..5dea4e8dbc55 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
231 * we shouldn't allow anything to go through for a bypassing queue. 231 * we shouldn't allow anything to go through for a bypassing queue.
232 */ 232 */
233 if (unlikely(blk_queue_bypass(q))) 233 if (unlikely(blk_queue_bypass(q)))
234 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); 234 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
235 return __blkg_lookup_create(blkcg, q, NULL); 235 return __blkg_lookup_create(blkcg, q, NULL);
236} 236}
237EXPORT_SYMBOL_GPL(blkg_lookup_create); 237EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/block/blk-core.c b/block/blk-core.c
index ee0e5cafa859..1a95272cca50 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
473 * blk_cleanup_queue - shutdown a request queue 473 * blk_cleanup_queue - shutdown a request queue
474 * @q: request queue to shutdown 474 * @q: request queue to shutdown
475 * 475 *
476 * Mark @q DEAD, drain all pending requests, destroy and put it. All 476 * Mark @q DYING, drain all pending requests, destroy and put it. All
477 * future requests will be failed immediately with -ENODEV. 477 * future requests will be failed immediately with -ENODEV.
478 */ 478 */
479void blk_cleanup_queue(struct request_queue *q) 479void blk_cleanup_queue(struct request_queue *q)
480{ 480{
481 spinlock_t *lock = q->queue_lock; 481 spinlock_t *lock = q->queue_lock;
482 482
483 /* mark @q DEAD, no new request or merges will be allowed afterwards */ 483 /* mark @q DYING, no new request or merges will be allowed afterwards */
484 mutex_lock(&q->sysfs_lock); 484 mutex_lock(&q->sysfs_lock);
485 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 485 queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
486 spin_lock_irq(lock); 486 spin_lock_irq(lock);
487 487
488 /* 488 /*
489 * Dead queue is permanently in bypass mode till released. Note 489 * A dying queue is permanently in bypass mode till released. Note
490 * that, unlike blk_queue_bypass_start(), we aren't performing 490 * that, unlike blk_queue_bypass_start(), we aren't performing
491 * synchronize_rcu() after entering bypass mode to avoid the delay 491 * synchronize_rcu() after entering bypass mode to avoid the delay
492 * as some drivers create and destroy a lot of queues while 492 * as some drivers create and destroy a lot of queues while
@@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q)
499 499
500 queue_flag_set(QUEUE_FLAG_NOMERGES, q); 500 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
501 queue_flag_set(QUEUE_FLAG_NOXMERGES, q); 501 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
502 queue_flag_set(QUEUE_FLAG_DEAD, q); 502 queue_flag_set(QUEUE_FLAG_DYING, q);
503 spin_unlock_irq(lock); 503 spin_unlock_irq(lock);
504 mutex_unlock(&q->sysfs_lock); 504 mutex_unlock(&q->sysfs_lock);
505 505
506 /* drain all requests queued before DEAD marking */ 506 /* drain all requests queued before DYING marking */
507 blk_drain_queue(q, true); 507 blk_drain_queue(q, true);
508 508
509 /* @q won't process any more request, flush async actions */ 509 /* @q won't process any more request, flush async actions */
@@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
716 716
717bool blk_get_queue(struct request_queue *q) 717bool blk_get_queue(struct request_queue *q)
718{ 718{
719 if (likely(!blk_queue_dead(q))) { 719 if (likely(!blk_queue_dying(q))) {
720 __blk_get_queue(q); 720 __blk_get_queue(q);
721 return true; 721 return true;
722 } 722 }
@@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
870 const bool is_sync = rw_is_sync(rw_flags) != 0; 870 const bool is_sync = rw_is_sync(rw_flags) != 0;
871 int may_queue; 871 int may_queue;
872 872
873 if (unlikely(blk_queue_dead(q))) 873 if (unlikely(blk_queue_dying(q)))
874 return NULL; 874 return NULL;
875 875
876 may_queue = elv_may_queue(q, rw_flags); 876 may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1050,7 @@ retry:
1050 if (rq) 1050 if (rq)
1051 return rq; 1051 return rq;
1052 1052
1053 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) { 1053 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
1054 blk_put_rl(rl); 1054 blk_put_rl(rl);
1055 return NULL; 1055 return NULL;
1056 } 1056 }
@@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1910 return -EIO; 1910 return -EIO;
1911 1911
1912 spin_lock_irqsave(q->queue_lock, flags); 1912 spin_lock_irqsave(q->queue_lock, flags);
1913 if (unlikely(blk_queue_dead(q))) { 1913 if (unlikely(blk_queue_dying(q))) {
1914 spin_unlock_irqrestore(q->queue_lock, flags); 1914 spin_unlock_irqrestore(q->queue_lock, flags);
1915 return -ENODEV; 1915 return -ENODEV;
1916 } 1916 }
@@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2885 trace_block_unplug(q, depth, !from_schedule); 2885 trace_block_unplug(q, depth, !from_schedule);
2886 2886
2887 /* 2887 /*
2888 * Don't mess with dead queue. 2888 * Don't mess with a dying queue.
2889 */ 2889 */
2890 if (unlikely(blk_queue_dead(q))) { 2890 if (unlikely(blk_queue_dying(q))) {
2891 spin_unlock(q->queue_lock); 2891 spin_unlock(q->queue_lock);
2892 return; 2892 return;
2893 } 2893 }
@@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2996 /* 2996 /*
2997 * Short-circuit if @q is dead 2997 * Short-circuit if @q is dead
2998 */ 2998 */
2999 if (unlikely(blk_queue_dead(q))) { 2999 if (unlikely(blk_queue_dying(q))) {
3000 __blk_end_request_all(rq, -ENODEV); 3000 __blk_end_request_all(rq, -ENODEV);
3001 continue; 3001 continue;
3002 } 3002 }
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 8b6dc5bd4dd0..4aec98df7ba5 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -60,7 +60,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
60 60
61 spin_lock_irq(q->queue_lock); 61 spin_lock_irq(q->queue_lock);
62 62
63 if (unlikely(blk_queue_dead(q))) { 63 if (unlikely(blk_queue_dying(q))) {
64 rq->errors = -ENXIO; 64 rq->errors = -ENXIO;
65 if (rq->end_io) 65 if (rq->end_io)
66 rq->end_io(rq, rq->errors); 66 rq->end_io(rq, rq->errors);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ce6204608822..788147797a79 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
466 if (!entry->show) 466 if (!entry->show)
467 return -EIO; 467 return -EIO;
468 mutex_lock(&q->sysfs_lock); 468 mutex_lock(&q->sysfs_lock);
469 if (blk_queue_dead(q)) { 469 if (blk_queue_dying(q)) {
470 mutex_unlock(&q->sysfs_lock); 470 mutex_unlock(&q->sysfs_lock);
471 return -ENOENT; 471 return -ENOENT;
472 } 472 }
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
488 488
489 q = container_of(kobj, struct request_queue, kobj); 489 q = container_of(kobj, struct request_queue, kobj);
490 mutex_lock(&q->sysfs_lock); 490 mutex_lock(&q->sysfs_lock);
491 if (blk_queue_dead(q)) { 491 if (blk_queue_dying(q)) {
492 mutex_unlock(&q->sysfs_lock); 492 mutex_unlock(&q->sysfs_lock);
493 return -ENOENT; 493 return -ENOENT;
494 } 494 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a9664fa0b609..31146225f3d0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
302 /* if %NULL and @q is alive, fall back to root_tg */ 302 /* if %NULL and @q is alive, fall back to root_tg */
303 if (!IS_ERR(blkg)) 303 if (!IS_ERR(blkg))
304 tg = blkg_to_tg(blkg); 304 tg = blkg_to_tg(blkg);
305 else if (!blk_queue_dead(q)) 305 else if (!blk_queue_dying(q))
306 tg = td_root_tg(td); 306 tg = td_root_tg(td);
307 } 307 }
308 308
diff --git a/block/blk.h b/block/blk.h
index ca51543b248c..2218a8a78292 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
96 q->flush_queue_delayed = 1; 96 q->flush_queue_delayed = 1;
97 return NULL; 97 return NULL;
98 } 98 }
99 if (unlikely(blk_queue_dead(q)) || 99 if (unlikely(blk_queue_dying(q)) ||
100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0)) 100 !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
101 return NULL; 101 return NULL;
102 } 102 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index da36a3a81a9e..f29a1a9b54d2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1406,7 +1406,7 @@ static int scsi_lld_busy(struct request_queue *q)
1406 struct scsi_device *sdev = q->queuedata; 1406 struct scsi_device *sdev = q->queuedata;
1407 struct Scsi_Host *shost; 1407 struct Scsi_Host *shost;
1408 1408
1409 if (blk_queue_dead(q)) 1409 if (blk_queue_dying(q))
1410 return 0; 1410 return 0;
1411 1411
1412 shost = sdev->host; 1412 shost = sdev->host;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1756001210d2..aba8246afe72 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -437,7 +437,7 @@ struct request_queue {
437#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 437#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
438#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 438#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
439#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 439#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
440#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 440#define QUEUE_FLAG_DYING 5 /* queue being torn down */
441#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 441#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
442#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 442#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
443#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 443#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
@@ -521,7 +521,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
521 521
522#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 522#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
523#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 523#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
524#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 524#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
525#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 525#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
526#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 526#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
527#define blk_queue_noxmerges(q) \ 527#define blk_queue_noxmerges(q) \