diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-17 02:57:56 -0400 |
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-17 03:01:08 -0400 |
| commit | a22ddff8bedfe33eeb1330bbb7ef1fbe007a42c4 (patch) | |
| tree | 61a2eb7fa62f5af10c2b913ca429e6b068b0eb2d /include/linux/blkdev.h | |
| parent | 20d5a540e55a29daeef12706f9ee73baf5641c16 (diff) | |
| parent | d9875690d9b89a866022ff49e3fcea892345ad92 (diff) | |
Merge tag 'v3.6-rc2' into drm-intel-next
Backmerge Linux 3.6-rc2 to resolve a few funny conflicts before we put
even more madness on top:
- drivers/gpu/drm/i915/i915_irq.c: Just a spurious WARN removed in
-fixes, that has been changed in a variable-rename in -next, too.
- drivers/gpu/drm/i915/intel_ringbuffer.c: -next remove scratch_addr
(since all their users have been extracted in another fucntion),
-fixes added another user for a hw workaroudn.
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 62 |
1 files changed, 38 insertions, 24 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ba43f408baa3..4e72a9d48232 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -46,16 +46,23 @@ struct blkcg_gq; | |||
| 46 | struct request; | 46 | struct request; |
| 47 | typedef void (rq_end_io_fn)(struct request *, int); | 47 | typedef void (rq_end_io_fn)(struct request *, int); |
| 48 | 48 | ||
| 49 | #define BLK_RL_SYNCFULL (1U << 0) | ||
| 50 | #define BLK_RL_ASYNCFULL (1U << 1) | ||
| 51 | |||
| 49 | struct request_list { | 52 | struct request_list { |
| 53 | struct request_queue *q; /* the queue this rl belongs to */ | ||
| 54 | #ifdef CONFIG_BLK_CGROUP | ||
| 55 | struct blkcg_gq *blkg; /* blkg this request pool belongs to */ | ||
| 56 | #endif | ||
| 50 | /* | 57 | /* |
| 51 | * count[], starved[], and wait[] are indexed by | 58 | * count[], starved[], and wait[] are indexed by |
| 52 | * BLK_RW_SYNC/BLK_RW_ASYNC | 59 | * BLK_RW_SYNC/BLK_RW_ASYNC |
| 53 | */ | 60 | */ |
| 54 | int count[2]; | 61 | int count[2]; |
| 55 | int starved[2]; | 62 | int starved[2]; |
| 56 | int elvpriv; | 63 | mempool_t *rq_pool; |
| 57 | mempool_t *rq_pool; | 64 | wait_queue_head_t wait[2]; |
| 58 | wait_queue_head_t wait[2]; | 65 | unsigned int flags; |
| 59 | }; | 66 | }; |
| 60 | 67 | ||
| 61 | /* | 68 | /* |
| @@ -138,6 +145,7 @@ struct request { | |||
| 138 | struct hd_struct *part; | 145 | struct hd_struct *part; |
| 139 | unsigned long start_time; | 146 | unsigned long start_time; |
| 140 | #ifdef CONFIG_BLK_CGROUP | 147 | #ifdef CONFIG_BLK_CGROUP |
| 148 | struct request_list *rl; /* rl this rq is alloced from */ | ||
| 141 | unsigned long long start_time_ns; | 149 | unsigned long long start_time_ns; |
| 142 | unsigned long long io_start_time_ns; /* when passed to hardware */ | 150 | unsigned long long io_start_time_ns; /* when passed to hardware */ |
| 143 | #endif | 151 | #endif |
| @@ -282,11 +290,16 @@ struct request_queue { | |||
| 282 | struct list_head queue_head; | 290 | struct list_head queue_head; |
| 283 | struct request *last_merge; | 291 | struct request *last_merge; |
| 284 | struct elevator_queue *elevator; | 292 | struct elevator_queue *elevator; |
| 293 | int nr_rqs[2]; /* # allocated [a]sync rqs */ | ||
| 294 | int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ | ||
| 285 | 295 | ||
| 286 | /* | 296 | /* |
| 287 | * the queue request freelist, one for reads and one for writes | 297 | * If blkcg is not used, @q->root_rl serves all requests. If blkcg |
| 298 | * is used, root blkg allocates from @q->root_rl and all other | ||
| 299 | * blkgs from their own blkg->rl. Which one to use should be | ||
| 300 | * determined using bio_request_list(). | ||
| 288 | */ | 301 | */ |
| 289 | struct request_list rq; | 302 | struct request_list root_rl; |
| 290 | 303 | ||
| 291 | request_fn_proc *request_fn; | 304 | request_fn_proc *request_fn; |
| 292 | make_request_fn *make_request_fn; | 305 | make_request_fn *make_request_fn; |
| @@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq) | |||
| 561 | return rw_is_sync(rq->cmd_flags); | 574 | return rw_is_sync(rq->cmd_flags); |
| 562 | } | 575 | } |
| 563 | 576 | ||
| 564 | static inline int blk_queue_full(struct request_queue *q, int sync) | 577 | static inline bool blk_rl_full(struct request_list *rl, bool sync) |
| 565 | { | 578 | { |
| 566 | if (sync) | 579 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
| 567 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); | 580 | |
| 568 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); | 581 | return rl->flags & flag; |
| 569 | } | 582 | } |
| 570 | 583 | ||
| 571 | static inline void blk_set_queue_full(struct request_queue *q, int sync) | 584 | static inline void blk_set_rl_full(struct request_list *rl, bool sync) |
| 572 | { | 585 | { |
| 573 | if (sync) | 586 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
| 574 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); | 587 | |
| 575 | else | 588 | rl->flags |= flag; |
| 576 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); | ||
| 577 | } | 589 | } |
| 578 | 590 | ||
| 579 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) | 591 | static inline void blk_clear_rl_full(struct request_list *rl, bool sync) |
| 580 | { | 592 | { |
| 581 | if (sync) | 593 | unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL; |
| 582 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); | 594 | |
| 583 | else | 595 | rl->flags &= ~flag; |
| 584 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); | ||
| 585 | } | 596 | } |
| 586 | 597 | ||
| 587 | 598 | ||
| @@ -827,7 +838,6 @@ extern bool __blk_end_request_err(struct request *rq, int error); | |||
| 827 | extern void blk_complete_request(struct request *); | 838 | extern void blk_complete_request(struct request *); |
| 828 | extern void __blk_complete_request(struct request *); | 839 | extern void __blk_complete_request(struct request *); |
| 829 | extern void blk_abort_request(struct request *); | 840 | extern void blk_abort_request(struct request *); |
| 830 | extern void blk_abort_queue(struct request_queue *); | ||
| 831 | extern void blk_unprep_request(struct request *); | 841 | extern void blk_unprep_request(struct request *); |
| 832 | 842 | ||
| 833 | /* | 843 | /* |
| @@ -912,11 +922,15 @@ struct blk_plug { | |||
| 912 | }; | 922 | }; |
| 913 | #define BLK_MAX_REQUEST_COUNT 16 | 923 | #define BLK_MAX_REQUEST_COUNT 16 |
| 914 | 924 | ||
| 925 | struct blk_plug_cb; | ||
| 926 | typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); | ||
| 915 | struct blk_plug_cb { | 927 | struct blk_plug_cb { |
| 916 | struct list_head list; | 928 | struct list_head list; |
| 917 | void (*callback)(struct blk_plug_cb *); | 929 | blk_plug_cb_fn callback; |
| 930 | void *data; | ||
| 918 | }; | 931 | }; |
| 919 | 932 | extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, | |
| 933 | void *data, int size); | ||
| 920 | extern void blk_start_plug(struct blk_plug *); | 934 | extern void blk_start_plug(struct blk_plug *); |
| 921 | extern void blk_finish_plug(struct blk_plug *); | 935 | extern void blk_finish_plug(struct blk_plug *); |
| 922 | extern void blk_flush_plug_list(struct blk_plug *, bool); | 936 | extern void blk_flush_plug_list(struct blk_plug *, bool); |
