aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-06-04 23:40:59 -0400
committerJens Axboe <axboe@kernel.dk>2012-06-25 05:53:52 -0400
commit5b788ce3e2acac9bf109743b1281d77347cf2101 (patch)
tree907477e42d27bec9a2060fcc709402b7636390c9 /include
parent8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d (diff)
block: prepare for multiple request_lists
Request allocation is about to be made per-blkg meaning that there'll be multiple request lists. * Make queue full state per request_list. blk_*queue_full() functions are renamed to blk_*rl_full() and takes @rl instead of @q. * Rename blk_init_free_list() to blk_init_rl() and make it take @rl instead of @q. Also add @gfp_mask parameter. * Add blk_exit_rl() instead of destroying rl directly from blk_release_queue(). * Add request_list->q and make request alloc/free functions - blk_free_request(), [__]freed_request(), __get_request() - take @rl instead of @q. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blkdev.h32
1 files changed, 18 insertions, 14 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 7e44ed93f84b..f2385ee7c7b2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -46,7 +46,12 @@ struct blkcg_gq;
46struct request; 46struct request;
47typedef void (rq_end_io_fn)(struct request *, int); 47typedef void (rq_end_io_fn)(struct request *, int);
48 48
49#define BLK_RL_SYNCFULL (1U << 0)
50#define BLK_RL_ASYNCFULL (1U << 1)
51
49struct request_list { 52struct request_list {
53 struct request_queue *q; /* the queue this rl belongs to */
54
50 /* 55 /*
51 * count[], starved[], and wait[] are indexed by 56 * count[], starved[], and wait[] are indexed by
52 * BLK_RW_SYNC/BLK_RW_ASYNC 57 * BLK_RW_SYNC/BLK_RW_ASYNC
@@ -55,6 +60,7 @@ struct request_list {
55 int starved[2]; 60 int starved[2];
56 mempool_t *rq_pool; 61 mempool_t *rq_pool;
57 wait_queue_head_t wait[2]; 62 wait_queue_head_t wait[2];
63 unsigned int flags;
58}; 64};
59 65
60/* 66/*
@@ -562,27 +568,25 @@ static inline bool rq_is_sync(struct request *rq)
562 return rw_is_sync(rq->cmd_flags); 568 return rw_is_sync(rq->cmd_flags);
563} 569}
564 570
565static inline int blk_queue_full(struct request_queue *q, int sync) 571static inline bool blk_rl_full(struct request_list *rl, bool sync)
566{ 572{
567 if (sync) 573 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
568 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 574
569 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 575 return rl->flags & flag;
570} 576}
571 577
572static inline void blk_set_queue_full(struct request_queue *q, int sync) 578static inline void blk_set_rl_full(struct request_list *rl, bool sync)
573{ 579{
574 if (sync) 580 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
575 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 581
576 else 582 rl->flags |= flag;
577 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
578} 583}
579 584
580static inline void blk_clear_queue_full(struct request_queue *q, int sync) 585static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
581{ 586{
582 if (sync) 587 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
583 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 588
584 else 589 rl->flags &= ~flag;
585 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
586} 590}
587 591
588 592