aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJoe Lawrence <joe.lawrence@stratus.com>2014-08-28 10:15:21 -0400
committerJens Axboe <axboe@fb.com>2014-08-28 12:03:46 -0400
commita492f075450f3ba87de36e5ffe92a9d0c7af9723 (patch)
tree61960a71c7fde0eee3d77cda460154d2f7715d2f /block
parenteb571eeade2598635f813b3284d02c13a380301e (diff)
block,scsi: fixup blk_get_request dead queue scenarios
The blk_get_request function may fail in low-memory conditions or during device removal (even if __GFP_WAIT is set). To distinguish between these errors, modify the blk_get_request call stack to return the appropriate ERR_PTR. Verify that all callers check the return status and consider IS_ERR instead of a simple NULL pointer check. For consistency, make a similar change to the blk_mq_alloc_request leg of blk_get_request. It may fail if the queue is dead, or the caller was unwilling to wait. Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com> Acked-by: Jiri Kosina <jkosina@suse.cz> [for pktdvd] Acked-by: Boaz Harrosh <bharrosh@panasas.com> [for osd] Reviewed-by: Jeff Moyer <jmoyer@redhat.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c34
-rw-r--r--block/blk-mq.c8
-rw-r--r--block/bsg.c8
-rw-r--r--block/scsi_ioctl.c12
4 files changed, 33 insertions, 29 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c359d72e9d76..93603e6ff479 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -933,9 +933,9 @@ static struct io_context *rq_ioc(struct bio *bio)
933 * Get a free request from @q. This function may fail under memory 933 * Get a free request from @q. This function may fail under memory
934 * pressure or if @q is dead. 934 * pressure or if @q is dead.
935 * 935 *
936 * Must be callled with @q->queue_lock held and, 936 * Must be called with @q->queue_lock held and,
937 * Returns %NULL on failure, with @q->queue_lock held. 937 * Returns ERR_PTR on failure, with @q->queue_lock held.
938 * Returns !%NULL on success, with @q->queue_lock *not held*. 938 * Returns request pointer on success, with @q->queue_lock *not held*.
939 */ 939 */
940static struct request *__get_request(struct request_list *rl, int rw_flags, 940static struct request *__get_request(struct request_list *rl, int rw_flags,
941 struct bio *bio, gfp_t gfp_mask) 941 struct bio *bio, gfp_t gfp_mask)
@@ -949,7 +949,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
949 int may_queue; 949 int may_queue;
950 950
951 if (unlikely(blk_queue_dying(q))) 951 if (unlikely(blk_queue_dying(q)))
952 return NULL; 952 return ERR_PTR(-ENODEV);
953 953
954 may_queue = elv_may_queue(q, rw_flags); 954 may_queue = elv_may_queue(q, rw_flags);
955 if (may_queue == ELV_MQUEUE_NO) 955 if (may_queue == ELV_MQUEUE_NO)
@@ -974,7 +974,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
974 * process is not a "batcher", and not 974 * process is not a "batcher", and not
975 * exempted by the IO scheduler 975 * exempted by the IO scheduler
976 */ 976 */
977 return NULL; 977 return ERR_PTR(-ENOMEM);
978 } 978 }
979 } 979 }
980 } 980 }
@@ -992,7 +992,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
992 * allocated with any setting of ->nr_requests 992 * allocated with any setting of ->nr_requests
993 */ 993 */
994 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 994 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
995 return NULL; 995 return ERR_PTR(-ENOMEM);
996 996
997 q->nr_rqs[is_sync]++; 997 q->nr_rqs[is_sync]++;
998 rl->count[is_sync]++; 998 rl->count[is_sync]++;
@@ -1097,7 +1097,7 @@ fail_alloc:
1097rq_starved: 1097rq_starved:
1098 if (unlikely(rl->count[is_sync] == 0)) 1098 if (unlikely(rl->count[is_sync] == 0))
1099 rl->starved[is_sync] = 1; 1099 rl->starved[is_sync] = 1;
1100 return NULL; 1100 return ERR_PTR(-ENOMEM);
1101} 1101}
1102 1102
1103/** 1103/**
@@ -1110,9 +1110,9 @@ rq_starved:
1110 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this 1110 * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
1111 * function keeps retrying under memory pressure and fails iff @q is dead. 1111 * function keeps retrying under memory pressure and fails iff @q is dead.
1112 * 1112 *
1113 * Must be callled with @q->queue_lock held and, 1113 * Must be called with @q->queue_lock held and,
1114 * Returns %NULL on failure, with @q->queue_lock held. 1114 * Returns ERR_PTR on failure, with @q->queue_lock held.
1115 * Returns !%NULL on success, with @q->queue_lock *not held*. 1115 * Returns request pointer on success, with @q->queue_lock *not held*.
1116 */ 1116 */
1117static struct request *get_request(struct request_queue *q, int rw_flags, 1117static struct request *get_request(struct request_queue *q, int rw_flags,
1118 struct bio *bio, gfp_t gfp_mask) 1118 struct bio *bio, gfp_t gfp_mask)
@@ -1125,12 +1125,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
1125 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ 1125 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1126retry: 1126retry:
1127 rq = __get_request(rl, rw_flags, bio, gfp_mask); 1127 rq = __get_request(rl, rw_flags, bio, gfp_mask);
1128 if (rq) 1128 if (!IS_ERR(rq))
1129 return rq; 1129 return rq;
1130 1130
1131 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { 1131 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
1132 blk_put_rl(rl); 1132 blk_put_rl(rl);
1133 return NULL; 1133 return rq;
1134 } 1134 }
1135 1135
1136 /* wait on @rl and retry */ 1136 /* wait on @rl and retry */
@@ -1167,7 +1167,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
1167 1167
1168 spin_lock_irq(q->queue_lock); 1168 spin_lock_irq(q->queue_lock);
1169 rq = get_request(q, rw, NULL, gfp_mask); 1169 rq = get_request(q, rw, NULL, gfp_mask);
1170 if (!rq) 1170 if (IS_ERR(rq))
1171 spin_unlock_irq(q->queue_lock); 1171 spin_unlock_irq(q->queue_lock);
1172 /* q->queue_lock is unlocked at this point */ 1172 /* q->queue_lock is unlocked at this point */
1173 1173
@@ -1219,8 +1219,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1219{ 1219{
1220 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 1220 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1221 1221
1222 if (unlikely(!rq)) 1222 if (IS_ERR(rq))
1223 return ERR_PTR(-ENOMEM); 1223 return rq;
1224 1224
1225 blk_rq_set_block_pc(rq); 1225 blk_rq_set_block_pc(rq);
1226 1226
@@ -1615,8 +1615,8 @@ get_rq:
1615 * Returns with the queue unlocked. 1615 * Returns with the queue unlocked.
1616 */ 1616 */
1617 req = get_request(q, rw_flags, bio, GFP_NOIO); 1617 req = get_request(q, rw_flags, bio, GFP_NOIO);
1618 if (unlikely(!req)) { 1618 if (IS_ERR(req)) {
1619 bio_endio(bio, -ENODEV); /* @q is dead */ 1619 bio_endio(bio, PTR_ERR(req)); /* @q is dead */
1620 goto out_unlock; 1620 goto out_unlock;
1621 } 1621 }
1622 1622
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5189cb1e478a..940aa8a34b70 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -218,9 +218,11 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
218 struct blk_mq_hw_ctx *hctx; 218 struct blk_mq_hw_ctx *hctx;
219 struct request *rq; 219 struct request *rq;
220 struct blk_mq_alloc_data alloc_data; 220 struct blk_mq_alloc_data alloc_data;
221 int ret;
221 222
222 if (blk_mq_queue_enter(q)) 223 ret = blk_mq_queue_enter(q);
223 return NULL; 224 if (ret)
225 return ERR_PTR(ret);
224 226
225 ctx = blk_mq_get_ctx(q); 227 ctx = blk_mq_get_ctx(q);
226 hctx = q->mq_ops->map_queue(q, ctx->cpu); 228 hctx = q->mq_ops->map_queue(q, ctx->cpu);
@@ -240,6 +242,8 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
240 ctx = alloc_data.ctx; 242 ctx = alloc_data.ctx;
241 } 243 }
242 blk_mq_put_ctx(ctx); 244 blk_mq_put_ctx(ctx);
245 if (!rq)
246 return ERR_PTR(-EWOULDBLOCK);
243 return rq; 247 return rq;
244} 248}
245EXPORT_SYMBOL(blk_mq_alloc_request); 249EXPORT_SYMBOL(blk_mq_alloc_request);
diff --git a/block/bsg.c b/block/bsg.c
index ff46addde5d8..73c78fd12cc1 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -270,8 +270,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
270 * map scatter-gather elements separately and string them to request 270 * map scatter-gather elements separately and string them to request
271 */ 271 */
272 rq = blk_get_request(q, rw, GFP_KERNEL); 272 rq = blk_get_request(q, rw, GFP_KERNEL);
273 if (!rq) 273 if (IS_ERR(rq))
274 return ERR_PTR(-ENOMEM); 274 return rq;
275 blk_rq_set_block_pc(rq); 275 blk_rq_set_block_pc(rq);
276 276
277 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); 277 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
@@ -285,8 +285,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
285 } 285 }
286 286
287 next_rq = blk_get_request(q, READ, GFP_KERNEL); 287 next_rq = blk_get_request(q, READ, GFP_KERNEL);
288 if (!next_rq) { 288 if (IS_ERR(next_rq)) {
289 ret = -ENOMEM; 289 ret = PTR_ERR(next_rq);
290 goto out; 290 goto out;
291 } 291 }
292 rq->next_rq = next_rq; 292 rq->next_rq = next_rq;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 29d056782833..a8b0d0208448 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -318,8 +318,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
318 at_head = 1; 318 at_head = 1;
319 319
320 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); 320 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
321 if (!rq) 321 if (IS_ERR(rq))
322 return -ENOMEM; 322 return PTR_ERR(rq);
323 blk_rq_set_block_pc(rq); 323 blk_rq_set_block_pc(rq);
324 324
325 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { 325 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
@@ -448,8 +448,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
448 } 448 }
449 449
450 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 450 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
451 if (!rq) { 451 if (IS_ERR(rq)) {
452 err = -ENODEV; 452 err = PTR_ERR(rq);
453 goto error_free_buffer; 453 goto error_free_buffer;
454 } 454 }
455 455
@@ -539,8 +539,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
539 int err; 539 int err;
540 540
541 rq = blk_get_request(q, WRITE, __GFP_WAIT); 541 rq = blk_get_request(q, WRITE, __GFP_WAIT);
542 if (!rq) 542 if (IS_ERR(rq))
543 return -ENODEV; 543 return PTR_ERR(rq);
544 blk_rq_set_block_pc(rq); 544 blk_rq_set_block_pc(rq);
545 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 545 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
546 rq->cmd[0] = cmd; 546 rq->cmd[0] = cmd;