aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk-throttle.c4
-rw-r--r--block/blk.h2
-rw-r--r--include/linux/blkdev.h1
6 files changed, 10 insertions, 9 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 435af2378614..b5ed4f4a8d96 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -608,7 +608,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue_node);
608 608
609int blk_get_queue(struct request_queue *q) 609int blk_get_queue(struct request_queue *q)
610{ 610{
611 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 611 if (likely(!blk_queue_dead(q))) {
612 kobject_get(&q->kobj); 612 kobject_get(&q->kobj);
613 return 0; 613 return 0;
614 } 614 }
@@ -755,7 +755,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
755 const bool is_sync = rw_is_sync(rw_flags) != 0; 755 const bool is_sync = rw_is_sync(rw_flags) != 0;
756 int may_queue; 756 int may_queue;
757 757
758 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 758 if (unlikely(blk_queue_dead(q)))
759 return NULL; 759 return NULL;
760 760
761 may_queue = elv_may_queue(q, rw_flags); 761 may_queue = elv_may_queue(q, rw_flags);
@@ -875,7 +875,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
875 struct io_context *ioc; 875 struct io_context *ioc;
876 struct request_list *rl = &q->rq; 876 struct request_list *rl = &q->rq;
877 877
878 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 878 if (unlikely(blk_queue_dead(q)))
879 return NULL; 879 return NULL;
880 880
881 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 881 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
diff --git a/block/blk-exec.c b/block/blk-exec.c
index a1ebceb332f9..60532852b3ab 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -50,7 +50,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
50{ 50{
51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 52
53 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 53 if (unlikely(blk_queue_dead(q))) {
54 rq->errors = -ENXIO; 54 rq->errors = -ENXIO;
55 if (rq->end_io) 55 if (rq->end_io)
56 rq->end_io(rq, rq->errors); 56 rq->end_io(rq, rq->errors);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e7f9f657f105..f0b2ca8f66d0 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -425,7 +425,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
425 if (!entry->show) 425 if (!entry->show)
426 return -EIO; 426 return -EIO;
427 mutex_lock(&q->sysfs_lock); 427 mutex_lock(&q->sysfs_lock);
428 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 428 if (blk_queue_dead(q)) {
429 mutex_unlock(&q->sysfs_lock); 429 mutex_unlock(&q->sysfs_lock);
430 return -ENOENT; 430 return -ENOENT;
431 } 431 }
@@ -447,7 +447,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
447 447
448 q = container_of(kobj, struct request_queue, kobj); 448 q = container_of(kobj, struct request_queue, kobj);
449 mutex_lock(&q->sysfs_lock); 449 mutex_lock(&q->sysfs_lock);
450 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 450 if (blk_queue_dead(q)) {
451 mutex_unlock(&q->sysfs_lock); 451 mutex_unlock(&q->sysfs_lock);
452 return -ENOENT; 452 return -ENOENT;
453 } 453 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 4553245d9317..5eed6a76721d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -310,7 +310,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
310 struct request_queue *q = td->queue; 310 struct request_queue *q = td->queue;
311 311
312 /* no throttling for dead queue */ 312 /* no throttling for dead queue */
313 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) 313 if (unlikely(blk_queue_dead(q)))
314 return NULL; 314 return NULL;
315 315
316 rcu_read_lock(); 316 rcu_read_lock();
@@ -335,7 +335,7 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
335 spin_lock_irq(q->queue_lock); 335 spin_lock_irq(q->queue_lock);
336 336
337 /* Make sure @q is still alive */ 337 /* Make sure @q is still alive */
338 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 338 if (unlikely(blk_queue_dead(q))) {
339 kfree(tg); 339 kfree(tg);
340 return NULL; 340 return NULL;
341 } 341 }
diff --git a/block/blk.h b/block/blk.h
index 3f6551b3c92d..e38691dbb329 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -85,7 +85,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
85 q->flush_queue_delayed = 1; 85 q->flush_queue_delayed = 1;
86 return NULL; 86 return NULL;
87 } 87 }
88 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags) || 88 if (unlikely(blk_queue_dead(q)) ||
89 !q->elevator->ops->elevator_dispatch_fn(q, 0)) 89 !q->elevator->ops->elevator_dispatch_fn(q, 0))
90 return NULL; 90 return NULL;
91 } 91 }
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8a6b51b13a1c..783f97c14d0a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
481 481
482#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 482#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
483#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 483#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
484#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
484#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 485#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
485#define blk_queue_noxmerges(q) \ 486#define blk_queue_noxmerges(q) \
486 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 487 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)