diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 61 |
1 files changed, 38 insertions, 23 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 07954b05b86c..4e72a9d48232 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -46,16 +46,23 @@ struct blkcg_gq; | |||
46 | struct request; | 46 | struct request; |
47 | typedef void (rq_end_io_fn)(struct request *, int); | 47 | typedef void (rq_end_io_fn)(struct request *, int); |
48 | 48 | ||
49 | #define BLK_RL_SYNCFULL (1U << 0) | ||
50 | #define BLK_RL_ASYNCFULL (1U << 1) | ||
51 | |||
49 | struct request_list { | 52 | struct request_list { |
53 | struct request_queue *q; /* the queue this rl belongs to */ | ||
54 | #ifdef CONFIG_BLK_CGROUP | ||
55 | struct blkcg_gq *blkg; /* blkg this request pool belongs to */ | ||
56 | #endif | ||
50 | /* | 57 | /* |
51 | * count[], starved[], and wait[] are indexed by | 58 | * count[], starved[], and wait[] are indexed by |
52 | * BLK_RW_SYNC/BLK_RW_ASYNC | 59 | * BLK_RW_SYNC/BLK_RW_ASYNC |
53 | */ | 60 | */ |
54 | int count[2]; | 61 | int count[2]; |
55 | int starved[2]; | 62 | int starved[2]; |
56 | int elvpriv; | 63 | mempool_t *rq_pool; |
57 | mempool_t *rq_pool; | 64 | wait_queue_head_t wait[2]; |
58 | wait_queue_head_t wait[2]; | 65 | unsigned int flags; |
59 | }; | 66 | }; |
60 | 67 | ||
61 | /* | 68 | /* |
@@ -138,6 +145,7 @@ struct request { | |||
138 | struct hd_struct *part; | 145 | struct hd_struct *part; |
139 | unsigned long start_time; | 146 | unsigned long start_time; |
140 | #ifdef CONFIG_BLK_CGROUP | 147 | #ifdef CONFIG_BLK_CGROUP |
148 | struct request_list *rl; /* rl this rq is alloced from */ | ||
141 | unsigned long long start_time_ns; | 149 | unsigned long long start_time_ns; |
142 | unsigned long long io_start_time_ns; /* when passed to hardware */ | 150 | unsigned long long io_start_time_ns; /* when passed to hardware */ |
143 | #endif | 151 | #endif |
@@ -282,11 +290,16 @@ struct request_queue { | |||
282 | struct list_head queue_head; | 290 | struct list_head queue_head; |
283 | struct request *last_merge; | 291 | struct request *last_merge; |
284 | struct elevator_queue *elevator; | 292 | struct elevator_queue *elevator; |
293 | int nr_rqs[2]; /* # allocated [a]sync rqs */ | ||
294 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ | ||
285 | 295 | ||
286 | /* | 296 | /* |
287 | * the queue request freelist, one for reads and one for writes | 297 | * If blkcg is not used, @q->root_rl serves all requests. If blkcg |
298 | * is used, root blkg allocates from @q->root_rl and all other | ||
299 | * blkgs from their own blkg->rl. Which one to use should be | ||
300 | * determined using bio_request_list(). | ||
288 | */ | 301 | */ |
289 | struct request_list rq; | 302 | struct request_list root_rl; |
290 | 303 | ||
291 | request_fn_proc *request_fn; | 304 | request_fn_proc *request_fn; |
292 | make_request_fn *make_request_fn; | 305 | make_request_fn *make_request_fn; |
@@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq) | |||
561 | return rw_is_sync(rq->cmd_flags); | 574 | return rw_is_sync(rq->cmd_flags); |
562 | } | 575 | } |
563 | 576 | ||
564 | static inline int blk_queue_full(struct request_queue *q, int sync) | 577 | static inline bool blk_rl_full(struct request_list *rl, bool sync) |
565 | { | 578 | { |
566 | if (sync) | 579 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
567 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); | 580 | |
568 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); | 581 | return rl->flags & flag; |
569 | } | 582 | } |
570 | 583 | ||
571 | static inline void blk_set_queue_full(struct request_queue *q, int sync) | 584 | static inline void blk_set_rl_full(struct request_list *rl, bool sync) |
572 | { | 585 | { |
573 | if (sync) | 586 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
574 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); | 587 | |
575 | else | 588 | rl->flags |= flag; |
576 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); | ||
577 | } | 589 | } |
578 | 590 | ||
579 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) | 591 | static inline void blk_clear_rl_full(struct request_list *rl, bool sync) |
580 | { | 592 | { |
581 | if (sync) | 593 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
582 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); | 594 | |
583 | else | 595 | rl->flags &= ~flag; |
584 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); | ||
585 | } | 596 | } |
586 | 597 | ||
587 | 598 | ||
@@ -911,11 +922,15 @@ struct blk_plug { | |||
911 | }; | 922 | }; |
912 | #define BLK_MAX_REQUEST_COUNT 16 | 923 | #define BLK_MAX_REQUEST_COUNT 16 |
913 | 924 | ||
925 | struct blk_plug_cb; | ||
926 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); | ||
914 | struct blk_plug_cb { | 927 | struct blk_plug_cb { |
915 | struct list_head list; | 928 | struct list_head list; |
916 | void (*callback)(struct blk_plug_cb *); | 929 | blk_plug_cb_fn callback; |
930 | void *data; | ||
917 | }; | 931 | }; |
918 | 932 | extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, | |
933 | void *data, int size); | ||
919 | extern void blk_start_plug(struct blk_plug *); | 934 | extern void blk_start_plug(struct blk_plug *); |
920 | extern void blk_finish_plug(struct blk_plug *); | 935 | extern void blk_finish_plug(struct blk_plug *); |
921 | extern void blk_flush_plug_list(struct blk_plug *, bool); | 936 | extern void blk_flush_plug_list(struct blk_plug *, bool); |