aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-10-19 08:31:22 -0400
committerJens Axboe <axboe@kernel.dk>2011-10-19 08:31:22 -0400
commit75eb6c372d41d6d140b893873f6687d78c987a44 (patch)
tree51864b2efe0415ad30226f218bc56c1a505bfb75 /block
parentbc9fcbf9cb8ec76d340da16fbf48a9a316e14c52 (diff)
block: pass around REQ_* flags instead of broken down booleans during request alloc/free
blk_alloc_request() and freed_request() take different combinations of REQ_* @flags, @priv and @is_sync when @flags is superset of the latter two. Make them take @flags only. This cleans up the code a bit and will ease updating allocation related REQ_* flags. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 79e41a76d96a..a3d2fdc8ed1c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -574,7 +574,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
574} 574}
575 575
576static struct request * 576static struct request *
577blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) 577blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask)
578{ 578{
579 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 579 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
580 580
@@ -585,12 +585,10 @@ blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
585 585
586 rq->cmd_flags = flags | REQ_ALLOCED; 586 rq->cmd_flags = flags | REQ_ALLOCED;
587 587
588 if (priv) { 588 if ((flags & REQ_ELVPRIV) &&
589 if (unlikely(elv_set_request(q, rq, gfp_mask))) { 589 unlikely(elv_set_request(q, rq, gfp_mask))) {
590 mempool_free(rq, q->rq.rq_pool); 590 mempool_free(rq, q->rq.rq_pool);
591 return NULL; 591 return NULL;
592 }
593 rq->cmd_flags |= REQ_ELVPRIV;
594 } 592 }
595 593
596 return rq; 594 return rq;
@@ -649,12 +647,13 @@ static void __freed_request(struct request_queue *q, int sync)
649 * A request has just been released. Account for it, update the full and 647 * A request has just been released. Account for it, update the full and
650 * congestion status, wake up any waiters. Called under q->queue_lock. 648 * congestion status, wake up any waiters. Called under q->queue_lock.
651 */ 649 */
652static void freed_request(struct request_queue *q, int sync, int priv) 650static void freed_request(struct request_queue *q, unsigned int flags)
653{ 651{
654 struct request_list *rl = &q->rq; 652 struct request_list *rl = &q->rq;
653 int sync = rw_is_sync(flags);
655 654
656 rl->count[sync]--; 655 rl->count[sync]--;
657 if (priv) 656 if (flags & REQ_ELVPRIV)
658 rl->elvpriv--; 657 rl->elvpriv--;
659 658
660 __freed_request(q, sync); 659 __freed_request(q, sync);
@@ -694,7 +693,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
694 struct request_list *rl = &q->rq; 693 struct request_list *rl = &q->rq;
695 struct io_context *ioc = NULL; 694 struct io_context *ioc = NULL;
696 const bool is_sync = rw_is_sync(rw_flags) != 0; 695 const bool is_sync = rw_is_sync(rw_flags) != 0;
697 int may_queue, priv = 0; 696 int may_queue;
698 697
699 may_queue = elv_may_queue(q, rw_flags); 698 may_queue = elv_may_queue(q, rw_flags);
700 if (may_queue == ELV_MQUEUE_NO) 699 if (may_queue == ELV_MQUEUE_NO)
@@ -738,17 +737,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
738 rl->count[is_sync]++; 737 rl->count[is_sync]++;
739 rl->starved[is_sync] = 0; 738 rl->starved[is_sync] = 0;
740 739
741 if (blk_rq_should_init_elevator(bio)) { 740 if (blk_rq_should_init_elevator(bio) &&
742 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 741 !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
743 if (priv) 742 rw_flags |= REQ_ELVPRIV;
744 rl->elvpriv++; 743 rl->elvpriv++;
745 } 744 }
746 745
747 if (blk_queue_io_stat(q)) 746 if (blk_queue_io_stat(q))
748 rw_flags |= REQ_IO_STAT; 747 rw_flags |= REQ_IO_STAT;
749 spin_unlock_irq(q->queue_lock); 748 spin_unlock_irq(q->queue_lock);
750 749
751 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); 750 rq = blk_alloc_request(q, rw_flags, gfp_mask);
752 if (unlikely(!rq)) { 751 if (unlikely(!rq)) {
753 /* 752 /*
754 * Allocation failed presumably due to memory. Undo anything 753 * Allocation failed presumably due to memory. Undo anything
@@ -758,7 +757,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
758 * wait queue, but this is pretty rare. 757 * wait queue, but this is pretty rare.
759 */ 758 */
760 spin_lock_irq(q->queue_lock); 759 spin_lock_irq(q->queue_lock);
761 freed_request(q, is_sync, priv); 760 freed_request(q, rw_flags);
762 761
763 /* 762 /*
764 * in the very unlikely event that allocation failed and no 763 * in the very unlikely event that allocation failed and no
@@ -1050,14 +1049,13 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1050 * it didn't come out of our reserved rq pools 1049 * it didn't come out of our reserved rq pools
1051 */ 1050 */
1052 if (req->cmd_flags & REQ_ALLOCED) { 1051 if (req->cmd_flags & REQ_ALLOCED) {
1053 int is_sync = rq_is_sync(req) != 0; 1052 unsigned int flags = req->cmd_flags;
1054 int priv = req->cmd_flags & REQ_ELVPRIV;
1055 1053
1056 BUG_ON(!list_empty(&req->queuelist)); 1054 BUG_ON(!list_empty(&req->queuelist));
1057 BUG_ON(!hlist_unhashed(&req->hash)); 1055 BUG_ON(!hlist_unhashed(&req->hash));
1058 1056
1059 blk_free_request(q, req); 1057 blk_free_request(q, req);
1060 freed_request(q, is_sync, priv); 1058 freed_request(q, flags);
1061 } 1059 }
1062} 1060}
1063EXPORT_SYMBOL_GPL(__blk_put_request); 1061EXPORT_SYMBOL_GPL(__blk_put_request);