aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorshaohua.li@intel.com <shaohua.li@intel.com>2011-05-06 13:34:41 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-05-06 13:36:25 -0400
commit3ac0cc4508709d42ec9aa351086c7d38bfc0660c (patch)
tree933d5ab6ff9b0763f36e614962abb5bdcc4a348a /block
parentf3876930952390a31c3a7fd68dd621464a36eb80 (diff)
block: hold queue if flush is running for non-queueable flush drive
In some drives, flush requests are non-queueable. When flush request is running, normal read/write requests can't run. If block layer dispatches such request, driver can't handle it and requeue it. Tejun suggested we can hold the queue when flush is running. This can avoid unnecessary requeue. Also this can improve performance. For example, we have request flush1, write1, flush 2. flush1 is dispatched, then queue is hold, write1 isn't inserted to queue. After flush1 is finished, flush2 will be dispatched. Since disk cache is already clean, flush2 will be finished very soon, so looks like flush2 is folded to flush1. In my test, the queue holding completely solves a regression introduced by commit 53d63e6b0dfb95882ec0219ba6bbd50cde423794: block: make the flush insertion use the tail of the dispatch list It's not a preempt type request, in fact we have to insert it behind requests that do specify INSERT_FRONT. which causes about 20% regression running a sysbench fileio workload. Stable: 2.6.39 only Cc: stable@kernel.org Signed-off-by: Shaohua Li <shaohua.li@intel.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-flush.c16
-rw-r--r--block/blk.h21
2 files changed, 31 insertions, 6 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6c9b5e189e62..bb21e4c36f70 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -212,13 +212,19 @@ static void flush_end_io(struct request *flush_rq, int error)
212 } 212 }
213 213
214 /* 214 /*
215 * Moving a request silently to empty queue_head may stall the 215 * Kick the queue to avoid stall for two cases:
216 * queue. Kick the queue in those cases. This function is called 216 * 1. Moving a request silently to empty queue_head may stall the
217 * from request completion path and calling directly into 217 * queue.
218 * request_fn may confuse the driver. Always use kblockd. 218 * 2. When flush request is running in non-queueable queue, the
219 * queue is hold. Restart the queue after flush request is finished
220 * to avoid stall.
221 * This function is called from request completion path and calling
222 * directly into request_fn may confuse the driver. Always use
223 * kblockd.
219 */ 224 */
220 if (queued) 225 if (queued || q->flush_queue_delayed)
221 blk_run_queue_async(q); 226 blk_run_queue_async(q);
227 q->flush_queue_delayed = 0;
222} 228}
223 229
224/** 230/**
diff --git a/block/blk.h b/block/blk.h
index c9df8fc3c999..83e4bff36201 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -62,7 +62,26 @@ static inline struct request *__elv_next_request(struct request_queue *q)
62 rq = list_entry_rq(q->queue_head.next); 62 rq = list_entry_rq(q->queue_head.next);
63 return rq; 63 return rq;
64 } 64 }
65 65 /*
66 * Flush request is running and flush request isn't queueable
67 * in the drive, we can hold the queue till flush request is
68 * finished. Even we don't do this, driver can't dispatch next
69 * requests and will requeue them. And this can improve
70 * throughput too. For example, we have request flush1, write1,
71 * flush 2. flush1 is dispatched, then queue is hold, write1
72 * isn't inserted to queue. After flush1 is finished, flush2
73 * will be dispatched. Since disk cache is already clean,
74 * flush2 will be finished very soon, so looks like flush2 is
75 * folded to flush1.
76 * Since the queue is hold, a flag is set to indicate the queue
77 * should be restarted later. Please see flush_end_io() for
78 * details.
79 */
80 if (q->flush_pending_idx != q->flush_running_idx &&
81 !queue_flush_queueable(q)) {
82 q->flush_queue_delayed = 1;
83 return NULL;
84 }
66 if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 85 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
67 return NULL; 86 return NULL;
68 } 87 }