aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-02-08 03:19:38 -0500
committerJens Axboe <axboe@kernel.dk>2012-02-08 03:19:38 -0500
commit050c8ea80e3e90019d9e981c6a117ef614e882ed (patch)
treee1c53f17a93ba48b9aedb0c1560dfb022733845f /block
parent4e8670e26135d8fbfd5e084fddc1a8ed9f8eb4cb (diff)
block: separate out blk_rq_merge_ok() and blk_try_merge() from elevator functions
blk_rq_merge_ok() is the elevator-neutral part of merge eligibility test. blk_try_merge() determines merge direction and expects the caller to have tested elv_rq_merge_ok() previously. elv_rq_merge_ok() now wraps blk_rq_merge_ok() and then calls elv_iosched_allow_merge(). elv_try_merge() is removed and the two callers are updated to call elv_rq_merge_ok() explicitly followed by blk_try_merge(). While at it, make rq_merge_ok() functions return bool. This is to prepare for plug merge update and doesn't introduce any behavior change. This is based on Jens' patch to skip elevator_allow_merge_fn() from plug merge. Signed-off-by: Tejun Heo <tj@kernel.org> LKML-Reference: <4F16F3CA.90904@kernel.dk> Original-patch-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-merge.c37
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c55
4 files changed, 45 insertions, 53 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 532b3a21b383..fa697bf691eb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1282,10 +1282,10 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1282 1282
1283 (*request_count)++; 1283 (*request_count)++;
1284 1284
1285 if (rq->q != q) 1285 if (rq->q != q || !elv_rq_merge_ok(rq, bio))
1286 continue; 1286 continue;
1287 1287
1288 el_ret = elv_try_merge(rq, bio); 1288 el_ret = blk_try_merge(rq, bio);
1289 if (el_ret == ELEVATOR_BACK_MERGE) { 1289 if (el_ret == ELEVATOR_BACK_MERGE) {
1290 ret = bio_attempt_back_merge(q, rq, bio); 1290 ret = bio_attempt_back_merge(q, rq, bio);
1291 if (ret) 1291 if (ret)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index cfcc37cb222b..160035f54882 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -471,3 +471,40 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
471{ 471{
472 return attempt_merge(q, rq, next); 472 return attempt_merge(q, rq, next);
473} 473}
474
475bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
476{
477 if (!rq_mergeable(rq))
478 return false;
479
480 /* don't merge file system requests and discard requests */
481 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
482 return false;
483
484 /* don't merge discard requests and secure discard requests */
485 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
486 return false;
487
488 /* different data direction or already started, don't merge */
489 if (bio_data_dir(bio) != rq_data_dir(rq))
490 return false;
491
492 /* must be same device and not a special request */
493 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
494 return false;
495
496 /* only merge integrity protected bio into ditto rq */
497 if (bio_integrity(bio) != blk_integrity_rq(rq))
498 return false;
499
500 return true;
501}
502
503int blk_try_merge(struct request *rq, struct bio *bio)
504{
505 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
506 return ELEVATOR_BACK_MERGE;
507 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
508 return ELEVATOR_FRONT_MERGE;
509 return ELEVATOR_NO_MERGE;
510}
diff --git a/block/blk.h b/block/blk.h
index 7efd772336de..9c12f80882b0 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -137,6 +137,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
137 struct request *next); 137 struct request *next);
138void blk_recalc_rq_segments(struct request *rq); 138void blk_recalc_rq_segments(struct request *rq);
139void blk_rq_set_mixed_merge(struct request *rq); 139void blk_rq_set_mixed_merge(struct request *rq);
140bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
141int blk_try_merge(struct request *rq, struct bio *bio);
140 142
141void blk_queue_congestion_threshold(struct request_queue *q); 143void blk_queue_congestion_threshold(struct request_queue *q);
142 144
diff --git a/block/elevator.c b/block/elevator.c
index 91e18f8af9be..f016855a46b0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -70,39 +70,9 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
70/* 70/*
71 * can we safely merge with this request? 71 * can we safely merge with this request?
72 */ 72 */
73int elv_rq_merge_ok(struct request *rq, struct bio *bio) 73bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
74{ 74{
75 if (!rq_mergeable(rq)) 75 if (!blk_rq_merge_ok(rq, bio))
76 return 0;
77
78 /*
79 * Don't merge file system requests and discard requests
80 */
81 if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
82 return 0;
83
84 /*
85 * Don't merge discard requests and secure discard requests
86 */
87 if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
88 return 0;
89
90 /*
91 * different data direction or already started, don't merge
92 */
93 if (bio_data_dir(bio) != rq_data_dir(rq))
94 return 0;
95
96 /*
97 * must be same device and not a special request
98 */
99 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
100 return 0;
101
102 /*
103 * only merge integrity protected bio into ditto rq
104 */
105 if (bio_integrity(bio) != blk_integrity_rq(rq))
106 return 0; 76 return 0;
107 77
108 if (!elv_iosched_allow_merge(rq, bio)) 78 if (!elv_iosched_allow_merge(rq, bio))
@@ -112,23 +82,6 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
112} 82}
113EXPORT_SYMBOL(elv_rq_merge_ok); 83EXPORT_SYMBOL(elv_rq_merge_ok);
114 84
115int elv_try_merge(struct request *__rq, struct bio *bio)
116{
117 int ret = ELEVATOR_NO_MERGE;
118
119 /*
120 * we can merge and sequence is ok, check if it's possible
121 */
122 if (elv_rq_merge_ok(__rq, bio)) {
123 if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
124 ret = ELEVATOR_BACK_MERGE;
125 else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
126 ret = ELEVATOR_FRONT_MERGE;
127 }
128
129 return ret;
130}
131
132static struct elevator_type *elevator_find(const char *name) 85static struct elevator_type *elevator_find(const char *name)
133{ 86{
134 struct elevator_type *e; 87 struct elevator_type *e;
@@ -478,8 +431,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
478 /* 431 /*
479 * First try one-hit cache. 432 * First try one-hit cache.
480 */ 433 */
481 if (q->last_merge) { 434 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
482 ret = elv_try_merge(q->last_merge, bio); 435 ret = blk_try_merge(q->last_merge, bio);
483 if (ret != ELEVATOR_NO_MERGE) { 436 if (ret != ELEVATOR_NO_MERGE) {
484 *req = q->last_merge; 437 *req = q->last_merge;
485 return ret; 438 return ret;