diff options
author | Joe Lawrence <joe.lawrence@stratus.com> | 2014-08-28 10:15:21 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-08-28 12:03:46 -0400 |
commit | a492f075450f3ba87de36e5ffe92a9d0c7af9723 (patch) | |
tree | 61960a71c7fde0eee3d77cda460154d2f7715d2f /block/blk-core.c | |
parent | eb571eeade2598635f813b3284d02c13a380301e (diff) |
block,scsi: fixup blk_get_request dead queue scenarios
The blk_get_request function may fail in low-memory conditions or during
device removal (even if __GFP_WAIT is set). To distinguish between these
errors, modify the blk_get_request call stack to return the appropriate
ERR_PTR. Verify that all callers check the return status and consider
IS_ERR instead of a simple NULL pointer check.
For consistency, make a similar change to the blk_mq_alloc_request leg
of blk_get_request. It may fail if the queue is dead, or the caller was
unwilling to wait.
Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com>
Acked-by: Jiri Kosina <jkosina@suse.cz> [for pktdvd]
Acked-by: Boaz Harrosh <bharrosh@panasas.com> [for osd]
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index c359d72e9d76..93603e6ff479 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -933,9 +933,9 @@ static struct io_context *rq_ioc(struct bio *bio) | |||
933 | * Get a free request from @q. This function may fail under memory | 933 | * Get a free request from @q. This function may fail under memory |
934 | * pressure or if @q is dead. | 934 | * pressure or if @q is dead. |
935 | * | 935 | * |
936 | * Must be callled with @q->queue_lock held and, | 936 | * Must be called with @q->queue_lock held and, |
937 | * Returns %NULL on failure, with @q->queue_lock held. | 937 | * Returns ERR_PTR on failure, with @q->queue_lock held. |
938 | * Returns !%NULL on success, with @q->queue_lock *not held*. | 938 | * Returns request pointer on success, with @q->queue_lock *not held*. |
939 | */ | 939 | */ |
940 | static struct request *__get_request(struct request_list *rl, int rw_flags, | 940 | static struct request *__get_request(struct request_list *rl, int rw_flags, |
941 | struct bio *bio, gfp_t gfp_mask) | 941 | struct bio *bio, gfp_t gfp_mask) |
@@ -949,7 +949,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, | |||
949 | int may_queue; | 949 | int may_queue; |
950 | 950 | ||
951 | if (unlikely(blk_queue_dying(q))) | 951 | if (unlikely(blk_queue_dying(q))) |
952 | return NULL; | 952 | return ERR_PTR(-ENODEV); |
953 | 953 | ||
954 | may_queue = elv_may_queue(q, rw_flags); | 954 | may_queue = elv_may_queue(q, rw_flags); |
955 | if (may_queue == ELV_MQUEUE_NO) | 955 | if (may_queue == ELV_MQUEUE_NO) |
@@ -974,7 +974,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, | |||
974 | * process is not a "batcher", and not | 974 | * process is not a "batcher", and not |
975 | * exempted by the IO scheduler | 975 | * exempted by the IO scheduler |
976 | */ | 976 | */ |
977 | return NULL; | 977 | return ERR_PTR(-ENOMEM); |
978 | } | 978 | } |
979 | } | 979 | } |
980 | } | 980 | } |
@@ -992,7 +992,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags, | |||
992 | * allocated with any setting of ->nr_requests | 992 | * allocated with any setting of ->nr_requests |
993 | */ | 993 | */ |
994 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) | 994 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) |
995 | return NULL; | 995 | return ERR_PTR(-ENOMEM); |
996 | 996 | ||
997 | q->nr_rqs[is_sync]++; | 997 | q->nr_rqs[is_sync]++; |
998 | rl->count[is_sync]++; | 998 | rl->count[is_sync]++; |
@@ -1097,7 +1097,7 @@ fail_alloc: | |||
1097 | rq_starved: | 1097 | rq_starved: |
1098 | if (unlikely(rl->count[is_sync] == 0)) | 1098 | if (unlikely(rl->count[is_sync] == 0)) |
1099 | rl->starved[is_sync] = 1; | 1099 | rl->starved[is_sync] = 1; |
1100 | return NULL; | 1100 | return ERR_PTR(-ENOMEM); |
1101 | } | 1101 | } |
1102 | 1102 | ||
1103 | /** | 1103 | /** |
@@ -1110,9 +1110,9 @@ rq_starved: | |||
1110 | * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this | 1110 | * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this |
1111 | * function keeps retrying under memory pressure and fails iff @q is dead. | 1111 | * function keeps retrying under memory pressure and fails iff @q is dead. |
1112 | * | 1112 | * |
1113 | * Must be callled with @q->queue_lock held and, | 1113 | * Must be called with @q->queue_lock held and, |
1114 | * Returns %NULL on failure, with @q->queue_lock held. | 1114 | * Returns ERR_PTR on failure, with @q->queue_lock held. |
1115 | * Returns !%NULL on success, with @q->queue_lock *not held*. | 1115 | * Returns request pointer on success, with @q->queue_lock *not held*. |
1116 | */ | 1116 | */ |
1117 | static struct request *get_request(struct request_queue *q, int rw_flags, | 1117 | static struct request *get_request(struct request_queue *q, int rw_flags, |
1118 | struct bio *bio, gfp_t gfp_mask) | 1118 | struct bio *bio, gfp_t gfp_mask) |
@@ -1125,12 +1125,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
1125 | rl = blk_get_rl(q, bio); /* transferred to @rq on success */ | 1125 | rl = blk_get_rl(q, bio); /* transferred to @rq on success */ |
1126 | retry: | 1126 | retry: |
1127 | rq = __get_request(rl, rw_flags, bio, gfp_mask); | 1127 | rq = __get_request(rl, rw_flags, bio, gfp_mask); |
1128 | if (rq) | 1128 | if (!IS_ERR(rq)) |
1129 | return rq; | 1129 | return rq; |
1130 | 1130 | ||
1131 | if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { | 1131 | if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { |
1132 | blk_put_rl(rl); | 1132 | blk_put_rl(rl); |
1133 | return NULL; | 1133 | return rq; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | /* wait on @rl and retry */ | 1136 | /* wait on @rl and retry */ |
@@ -1167,7 +1167,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, | |||
1167 | 1167 | ||
1168 | spin_lock_irq(q->queue_lock); | 1168 | spin_lock_irq(q->queue_lock); |
1169 | rq = get_request(q, rw, NULL, gfp_mask); | 1169 | rq = get_request(q, rw, NULL, gfp_mask); |
1170 | if (!rq) | 1170 | if (IS_ERR(rq)) |
1171 | spin_unlock_irq(q->queue_lock); | 1171 | spin_unlock_irq(q->queue_lock); |
1172 | /* q->queue_lock is unlocked at this point */ | 1172 | /* q->queue_lock is unlocked at this point */ |
1173 | 1173 | ||
@@ -1219,8 +1219,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio, | |||
1219 | { | 1219 | { |
1220 | struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); | 1220 | struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); |
1221 | 1221 | ||
1222 | if (unlikely(!rq)) | 1222 | if (IS_ERR(rq)) |
1223 | return ERR_PTR(-ENOMEM); | 1223 | return rq; |
1224 | 1224 | ||
1225 | blk_rq_set_block_pc(rq); | 1225 | blk_rq_set_block_pc(rq); |
1226 | 1226 | ||
@@ -1615,8 +1615,8 @@ get_rq: | |||
1615 | * Returns with the queue unlocked. | 1615 | * Returns with the queue unlocked. |
1616 | */ | 1616 | */ |
1617 | req = get_request(q, rw_flags, bio, GFP_NOIO); | 1617 | req = get_request(q, rw_flags, bio, GFP_NOIO); |
1618 | if (unlikely(!req)) { | 1618 | if (IS_ERR(req)) { |
1619 | bio_endio(bio, -ENODEV); /* @q is dead */ | 1619 | bio_endio(bio, PTR_ERR(req)); /* @q is dead */ |
1620 | goto out_unlock; | 1620 | goto out_unlock; |
1621 | } | 1621 | } |
1622 | 1622 | ||