diff options
author | Tejun Heo <tj@kernel.org> | 2011-01-25 06:43:54 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-01-25 06:43:54 -0500 |
commit | ae1b1539622fb46e51b4d13b3f9e5f4c713f86ae (patch) | |
tree | c5cb540141003a3ec7ebf0b8c6e01653ab6aaef5 /block/blk.h | |
parent | 143a87f4c9c629067afea5b6703d66ea88c82f8e (diff) |
block: reimplement FLUSH/FUA to support merge
The current FLUSH/FUA support has evolved from the implementation
which had to perform queue draining. As such, sequencing is done
queue-wide one flush request after another. However, with the
draining requirement gone, there's no reason to keep the queue-wide
sequential approach.
This patch reimplements FLUSH/FUA support such that each FLUSH/FUA
request is sequenced individually. The actual FLUSH execution is
double buffered and whenever a request wants to execute one for either
PRE or POSTFLUSH, it queues on the pending queue. Once certain
conditions are met, a flush request is issued and on its completion
all pending requests proceed to the next sequence.
This allows arbitrary merging of different type of flushes. How they
are merged can be primarily controlled and tuned by adjusting the
above said 'conditions' used to determine when to issue the next
flush.
This is inspired by Darrick's patches to merge multiple zero-data
flushes which helps workloads with highly concurrent fsync requests.
* As flush requests are never put on the IO scheduler, request fields
used for flush share space with rq->rb_node. rq->completion_data is
moved out of the union. This increases the request size by one
pointer.
As rq->elevator_private* are used only by the iosched too, it is
possible to reduce the request size further. However, to do that,
we need to modify request allocation path such that iosched data is
not allocated for flush requests.
* FLUSH/FUA processing happens on insertion now instead of dispatch.
- Comments updated as per Vivek and Mike.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: "Darrick J. Wong" <djwong@us.ibm.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk.h')
-rw-r--r-- | block/blk.h | 12 |
1 files changed, 4 insertions, 8 deletions
diff --git a/block/blk.h b/block/blk.h index 9d2ee8f4d9af..284b500852bd 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -51,21 +51,17 @@ static inline void blk_clear_rq_complete(struct request *rq) | |||
51 | */ | 51 | */ |
52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | 52 | #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) |
53 | 53 | ||
54 | struct request *blk_do_flush(struct request_queue *q, struct request *rq); | 54 | void blk_insert_flush(struct request *rq); |
55 | void blk_abort_flushes(struct request_queue *q); | ||
55 | 56 | ||
56 | static inline struct request *__elv_next_request(struct request_queue *q) | 57 | static inline struct request *__elv_next_request(struct request_queue *q) |
57 | { | 58 | { |
58 | struct request *rq; | 59 | struct request *rq; |
59 | 60 | ||
60 | while (1) { | 61 | while (1) { |
61 | while (!list_empty(&q->queue_head)) { | 62 | if (!list_empty(&q->queue_head)) { |
62 | rq = list_entry_rq(q->queue_head.next); | 63 | rq = list_entry_rq(q->queue_head.next); |
63 | if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) || | 64 | return rq; |
64 | (rq->cmd_flags & REQ_FLUSH_SEQ)) | ||
65 | return rq; | ||
66 | rq = blk_do_flush(q, rq); | ||
67 | if (rq) | ||
68 | return rq; | ||
69 | } | 65 | } |
70 | 66 | ||
71 | if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) | 67 | if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) |