diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-07-08 02:19:20 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-07-08 02:19:20 -0400 |
commit | 55c022bbddb2c056b5dff1bd1b1758d31b6d64c9 (patch) | |
tree | fdd3aa29a1407bbd19b8efe47b2538544da85a70 /block/blk-core.c | |
parent | 719c0c590609809365c6f3da2f923cd84dc99113 (diff) |
block: avoid building too big plug list
When I test fio script with big I/O depth, I found the total throughput drops
compared to some relative small I/O depth. The reason is the thread accumulates
big requests in its plug list and causes some delays (surely this depends
on CPU speed).
I thought we'd better have a threshold for requests. When a threshold reaches,
this means there is no request merge and queue lock contention isn't severe
when pushing per-task requests to queue, so the main advantages of blk plug
don't exist. We can force a plug list flush in this case.
With this, my test throughput actually increases and almost equals to small
I/O depth. Another side effect is irq off time decreases in blk_flush_plug_list()
for big I/O depth.
The BLK_MAX_REQUEST_COUNT is choosen arbitarily, but 16 is efficiently to
reduce lock contention to me. But I'm open here, 32 is ok in my test too.
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index d2f8f4049abd..a56485292062 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1302,7 +1302,10 @@ get_rq: | |||
1302 | plug->should_sort = 1; | 1302 | plug->should_sort = 1; |
1303 | } | 1303 | } |
1304 | list_add_tail(&req->queuelist, &plug->list); | 1304 | list_add_tail(&req->queuelist, &plug->list); |
1305 | plug->count++; | ||
1305 | drive_stat_acct(req, 1); | 1306 | drive_stat_acct(req, 1); |
1307 | if (plug->count >= BLK_MAX_REQUEST_COUNT) | ||
1308 | blk_flush_plug_list(plug, false); | ||
1306 | } else { | 1309 | } else { |
1307 | spin_lock_irq(q->queue_lock); | 1310 | spin_lock_irq(q->queue_lock); |
1308 | add_acct_request(q, req, where); | 1311 | add_acct_request(q, req, where); |
@@ -2626,6 +2629,7 @@ void blk_start_plug(struct blk_plug *plug) | |||
2626 | INIT_LIST_HEAD(&plug->list); | 2629 | INIT_LIST_HEAD(&plug->list); |
2627 | INIT_LIST_HEAD(&plug->cb_list); | 2630 | INIT_LIST_HEAD(&plug->cb_list); |
2628 | plug->should_sort = 0; | 2631 | plug->should_sort = 0; |
2632 | plug->count = 0; | ||
2629 | 2633 | ||
2630 | /* | 2634 | /* |
2631 | * If this is a nested plug, don't actually assign it. It will be | 2635 | * If this is a nested plug, don't actually assign it. It will be |
@@ -2709,6 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
2709 | return; | 2713 | return; |
2710 | 2714 | ||
2711 | list_splice_init(&plug->list, &list); | 2715 | list_splice_init(&plug->list, &list); |
2716 | plug->count = 0; | ||
2712 | 2717 | ||
2713 | if (plug->should_sort) { | 2718 | if (plug->should_sort) { |
2714 | list_sort(NULL, &list, plug_rq_cmp); | 2719 | list_sort(NULL, &list, plug_rq_cmp); |