diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index b94a396aa624..b1ea941f6dc3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -2003,8 +2003,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) | |||
2003 | } | 2003 | } |
2004 | 2004 | ||
2005 | static inline struct request * | 2005 | static inline struct request * |
2006 | blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, | 2006 | blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) |
2007 | int priv, gfp_t gfp_mask) | ||
2008 | { | 2007 | { |
2009 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 2008 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
2010 | 2009 | ||
@@ -2018,7 +2017,7 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, | |||
2018 | rq->cmd_flags = rw | REQ_ALLOCED; | 2017 | rq->cmd_flags = rw | REQ_ALLOCED; |
2019 | 2018 | ||
2020 | if (priv) { | 2019 | if (priv) { |
2021 | if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { | 2020 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { |
2022 | mempool_free(rq, q->rq.rq_pool); | 2021 | mempool_free(rq, q->rq.rq_pool); |
2023 | return NULL; | 2022 | return NULL; |
2024 | } | 2023 | } |
@@ -2109,7 +2108,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | |||
2109 | struct io_context *ioc = NULL; | 2108 | struct io_context *ioc = NULL; |
2110 | int may_queue, priv; | 2109 | int may_queue, priv; |
2111 | 2110 | ||
2112 | may_queue = elv_may_queue(q, rw, bio); | 2111 | may_queue = elv_may_queue(q, rw); |
2113 | if (may_queue == ELV_MQUEUE_NO) | 2112 | if (may_queue == ELV_MQUEUE_NO) |
2114 | goto rq_starved; | 2113 | goto rq_starved; |
2115 | 2114 | ||
@@ -2157,7 +2156,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | |||
2157 | 2156 | ||
2158 | spin_unlock_irq(q->queue_lock); | 2157 | spin_unlock_irq(q->queue_lock); |
2159 | 2158 | ||
2160 | rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); | 2159 | rq = blk_alloc_request(q, rw, priv, gfp_mask); |
2161 | if (unlikely(!rq)) { | 2160 | if (unlikely(!rq)) { |
2162 | /* | 2161 | /* |
2163 | * Allocation failed presumably due to memory. Undo anything | 2162 | * Allocation failed presumably due to memory. Undo anything |