aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-10-19 08:33:05 -0400
committerJens Axboe <axboe@kernel.dk>2011-10-19 08:33:05 -0400
commitda8303c63b8de73619884382d6e573d44aae0810 (patch)
treed8560fa6452c5a7583aa21a2e5505d68899e2df5 /block
parentbc16a4f933bc5ed50826b20561e4c3515061998b (diff)
block: make get_request[_wait]() fail if queue is dead
Currently get_request[_wait]() allocates request whether queue is dead or not. This patch makes get_request[_wait]() return NULL if @q is dead. blk_queue_bio() is updated to fail the submitted bio if request allocation fails. While at it, add docbook comments for get_request[_wait](). Note that the current code has rather unclear (there are spurious DEAD tests scattered around) assumption that the owner of a queue guarantees that no request travels block layer if the queue is dead and this patch in itself doesn't change much; however, this will allow fixing the broken assumption in the next patch. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c54
1 files changed, 38 insertions, 16 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6c491f2388e9..3508751c779a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -709,10 +709,19 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
709 return true; 709 return true;
710} 710}
711 711
712/* 712/**
713 * Get a free request, queue_lock must be held. 713 * get_request - get a free request
714 * Returns NULL on failure, with queue_lock held. 714 * @q: request_queue to allocate request from
715 * Returns !NULL on success, with queue_lock *not held*. 715 * @rw_flags: RW and SYNC flags
716 * @bio: bio to allocate request for (can be %NULL)
717 * @gfp_mask: allocation mask
718 *
719 * Get a free request from @q. This function may fail under memory
720 * pressure or if @q is dead.
721 *
722 * Must be callled with @q->queue_lock held and,
723 * Returns %NULL on failure, with @q->queue_lock held.
724 * Returns !%NULL on success, with @q->queue_lock *not held*.
716 */ 725 */
717static struct request *get_request(struct request_queue *q, int rw_flags, 726static struct request *get_request(struct request_queue *q, int rw_flags,
718 struct bio *bio, gfp_t gfp_mask) 727 struct bio *bio, gfp_t gfp_mask)
@@ -723,6 +732,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
723 const bool is_sync = rw_is_sync(rw_flags) != 0; 732 const bool is_sync = rw_is_sync(rw_flags) != 0;
724 int may_queue; 733 int may_queue;
725 734
735 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
736 return NULL;
737
726 may_queue = elv_may_queue(q, rw_flags); 738 may_queue = elv_may_queue(q, rw_flags);
727 if (may_queue == ELV_MQUEUE_NO) 739 if (may_queue == ELV_MQUEUE_NO)
728 goto rq_starved; 740 goto rq_starved;
@@ -815,11 +827,18 @@ out:
815 return rq; 827 return rq;
816} 828}
817 829
818/* 830/**
819 * No available requests for this queue, wait for some requests to become 831 * get_request_wait - get a free request with retry
820 * available. 832 * @q: request_queue to allocate request from
833 * @rw_flags: RW and SYNC flags
834 * @bio: bio to allocate request for (can be %NULL)
821 * 835 *
822 * Called with q->queue_lock held, and returns with it unlocked. 836 * Get a free request from @q. This function keeps retrying under memory
837 * pressure and fails iff @q is dead.
838 *
839 * Must be callled with @q->queue_lock held and,
840 * Returns %NULL on failure, with @q->queue_lock held.
841 * Returns !%NULL on success, with @q->queue_lock *not held*.
823 */ 842 */
824static struct request *get_request_wait(struct request_queue *q, int rw_flags, 843static struct request *get_request_wait(struct request_queue *q, int rw_flags,
825 struct bio *bio) 844 struct bio *bio)
@@ -833,6 +852,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
833 struct io_context *ioc; 852 struct io_context *ioc;
834 struct request_list *rl = &q->rq; 853 struct request_list *rl = &q->rq;
835 854
855 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
856 return NULL;
857
836 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, 858 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
837 TASK_UNINTERRUPTIBLE); 859 TASK_UNINTERRUPTIBLE);
838 860
@@ -863,19 +885,15 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
863{ 885{
864 struct request *rq; 886 struct request *rq;
865 887
866 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
867 return NULL;
868
869 BUG_ON(rw != READ && rw != WRITE); 888 BUG_ON(rw != READ && rw != WRITE);
870 889
871 spin_lock_irq(q->queue_lock); 890 spin_lock_irq(q->queue_lock);
872 if (gfp_mask & __GFP_WAIT) { 891 if (gfp_mask & __GFP_WAIT)
873 rq = get_request_wait(q, rw, NULL); 892 rq = get_request_wait(q, rw, NULL);
874 } else { 893 else
875 rq = get_request(q, rw, NULL, gfp_mask); 894 rq = get_request(q, rw, NULL, gfp_mask);
876 if (!rq) 895 if (!rq)
877 spin_unlock_irq(q->queue_lock); 896 spin_unlock_irq(q->queue_lock);
878 }
879 /* q->queue_lock is unlocked at this point */ 897 /* q->queue_lock is unlocked at this point */
880 898
881 return rq; 899 return rq;
@@ -1299,6 +1317,10 @@ get_rq:
1299 * Returns with the queue unlocked. 1317 * Returns with the queue unlocked.
1300 */ 1318 */
1301 req = get_request_wait(q, rw_flags, bio); 1319 req = get_request_wait(q, rw_flags, bio);
1320 if (unlikely(!req)) {
1321 bio_endio(bio, -ENODEV); /* @q is dead */
1322 goto out_unlock;
1323 }
1302 1324
1303 /* 1325 /*
1304 * After dropping the lock and possibly sleeping here, our request 1326 * After dropping the lock and possibly sleeping here, our request