diff options
author | Shaohua Li <shaohua.li@intel.com> | 2011-08-24 10:04:34 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-08-24 10:04:34 -0400 |
commit | 56ebdaf2fa3c5276be201c5d1aff1490b682ecf2 (patch) | |
tree | f99669db0cd846baac7bb468e2cc14324e8950a3 /block | |
parent | a63271627521b825b0dd0a564e9a9c62b4c1ca89 (diff) |
block: simplify force plug flush code a little bit
Cleaning up the code a little bit. attempt_plug_merge() traverses the plug
list anyway, we can do the request counting there, so stack size is reduced
a little bit.
The motivation here is I suspect if we should count the requests for each
queue (task could handle multiple disks in the meantime), but my test doesn't
show it's worthy doing. If somebody proves we should do it, below change
will make that more easier.
Signed-off-by: Shaohua Li <shli@kernel.org>
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 67dba694119..b2ed78afd9f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1167,7 +1167,7 @@ static bool bio_attempt_front_merge(struct request_queue *q, | |||
1167 | * true if merge was successful, otherwise false. | 1167 | * true if merge was successful, otherwise false. |
1168 | */ | 1168 | */ |
1169 | static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, | 1169 | static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, |
1170 | struct bio *bio) | 1170 | struct bio *bio, unsigned int *request_count) |
1171 | { | 1171 | { |
1172 | struct blk_plug *plug; | 1172 | struct blk_plug *plug; |
1173 | struct request *rq; | 1173 | struct request *rq; |
@@ -1176,10 +1176,13 @@ static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, | |||
1176 | plug = tsk->plug; | 1176 | plug = tsk->plug; |
1177 | if (!plug) | 1177 | if (!plug) |
1178 | goto out; | 1178 | goto out; |
1179 | *request_count = 0; | ||
1179 | 1180 | ||
1180 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { | 1181 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { |
1181 | int el_ret; | 1182 | int el_ret; |
1182 | 1183 | ||
1184 | (*request_count)++; | ||
1185 | |||
1183 | if (rq->q != q) | 1186 | if (rq->q != q) |
1184 | continue; | 1187 | continue; |
1185 | 1188 | ||
@@ -1219,6 +1222,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1219 | struct blk_plug *plug; | 1222 | struct blk_plug *plug; |
1220 | int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; | 1223 | int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; |
1221 | struct request *req; | 1224 | struct request *req; |
1225 | unsigned int request_count = 0; | ||
1222 | 1226 | ||
1223 | /* | 1227 | /* |
1224 | * low level driver can indicate that it wants pages above a | 1228 | * low level driver can indicate that it wants pages above a |
@@ -1237,7 +1241,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1237 | * Check if we can merge with the plugged list before grabbing | 1241 | * Check if we can merge with the plugged list before grabbing |
1238 | * any locks. | 1242 | * any locks. |
1239 | */ | 1243 | */ |
1240 | if (attempt_plug_merge(current, q, bio)) | 1244 | if (attempt_plug_merge(current, q, bio, &request_count)) |
1241 | goto out; | 1245 | goto out; |
1242 | 1246 | ||
1243 | spin_lock_irq(q->queue_lock); | 1247 | spin_lock_irq(q->queue_lock); |
@@ -1302,9 +1306,8 @@ get_rq: | |||
1302 | if (__rq->q != q) | 1306 | if (__rq->q != q) |
1303 | plug->should_sort = 1; | 1307 | plug->should_sort = 1; |
1304 | } | 1308 | } |
1305 | if (plug->count >= BLK_MAX_REQUEST_COUNT) | 1309 | if (request_count >= BLK_MAX_REQUEST_COUNT) |
1306 | blk_flush_plug_list(plug, false); | 1310 | blk_flush_plug_list(plug, false); |
1307 | plug->count++; | ||
1308 | list_add_tail(&req->queuelist, &plug->list); | 1311 | list_add_tail(&req->queuelist, &plug->list); |
1309 | drive_stat_acct(req, 1); | 1312 | drive_stat_acct(req, 1); |
1310 | } else { | 1313 | } else { |
@@ -2634,7 +2637,6 @@ void blk_start_plug(struct blk_plug *plug) | |||
2634 | INIT_LIST_HEAD(&plug->list); | 2637 | INIT_LIST_HEAD(&plug->list); |
2635 | INIT_LIST_HEAD(&plug->cb_list); | 2638 | INIT_LIST_HEAD(&plug->cb_list); |
2636 | plug->should_sort = 0; | 2639 | plug->should_sort = 0; |
2637 | plug->count = 0; | ||
2638 | 2640 | ||
2639 | /* | 2641 | /* |
2640 | * If this is a nested plug, don't actually assign it. It will be | 2642 | * If this is a nested plug, don't actually assign it. It will be |
@@ -2718,7 +2720,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
2718 | return; | 2720 | return; |
2719 | 2721 | ||
2720 | list_splice_init(&plug->list, &list); | 2722 | list_splice_init(&plug->list, &list); |
2721 | plug->count = 0; | ||
2722 | 2723 | ||
2723 | if (plug->should_sort) { | 2724 | if (plug->should_sort) { |
2724 | list_sort(NULL, &list, plug_rq_cmp); | 2725 | list_sort(NULL, &list, plug_rq_cmp); |