aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shaohua.li@intel.com>2011-08-24 10:04:32 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-08-24 10:04:32 -0400
commita63271627521b825b0dd0a564e9a9c62b4c1ca89 (patch)
tree8cb0076737679544da5de89484548c921e2131cc
parente8037d49835482c9534a9a07bed0d0ea831135ae (diff)
block: change force plug flush call order
Do blk_flush_plug_list() first and then add new request aDo blk_flush_plug_list() first and then add new request aDo blk_flush_plug_list() first and then add new request at the tail. New request can't be merged to existing requests, but later new requests might be merged with this new one. If blk_flush_plug_list() is done later, the merge doesn't happen. Believe it or not, this fixes a 10% regression running sysbench workload. Signed-off-by: Shaohua Li <shli@kernel.org> Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--block/blk-core.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 90e1ffdeb415..67dba6941194 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1302,11 +1302,11 @@ get_rq:
1302 if (__rq->q != q) 1302 if (__rq->q != q)
1303 plug->should_sort = 1; 1303 plug->should_sort = 1;
1304 } 1304 }
1305 list_add_tail(&req->queuelist, &plug->list);
1306 plug->count++;
1307 drive_stat_acct(req, 1);
1308 if (plug->count >= BLK_MAX_REQUEST_COUNT) 1305 if (plug->count >= BLK_MAX_REQUEST_COUNT)
1309 blk_flush_plug_list(plug, false); 1306 blk_flush_plug_list(plug, false);
1307 plug->count++;
1308 list_add_tail(&req->queuelist, &plug->list);
1309 drive_stat_acct(req, 1);
1310 } else { 1310 } else {
1311 spin_lock_irq(q->queue_lock); 1311 spin_lock_irq(q->queue_lock);
1312 add_acct_request(q, req, where); 1312 add_acct_request(q, req, where);