aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-03 06:56:18 -0400
committerJens Axboe <axboe@carl.(none)>2009-07-03 15:06:45 -0400
commitab0fd1debe730ec9998678a0c53caefbd121ed10 (patch)
treec44de6ef7d876a32c4f733fdc26d40cdcbf7d374
parentb59e64d0ddb756af57ea032383bfd393a286a8e8 (diff)
block: don't merge requests of different failfast settings
Block layer used to merge requests and bios with different failfast settings. This caused regular IOs to fail prematurely when they were merged into failfast requests for readahead. Niel Lambrechts could trigger the problem semi-reliably on ext4 when resuming from STR. ext4 uses readahead when reading inodes and combined with the deterministic extra SATA PHY exception cycle during resume on the specific configuration, non-readahead inode read would fail causing ext4 errors. Please read the following thread for details. http://lkml.org/lkml/2009/5/23/21 This patch makes block layer reject merging if the failfast settings don't match. This is correct but likely to lower IO performance by preventing regular IOs from mingling into surrounding readahead requests. Changes to allow such mixed merges and handle errors correctly will be added later. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Niel Lambrechts <niel.lambrechts@gmail.com> Cc: Theodore Tso <tytso@mit.edu> Signed-off-by: Jens Axboe <axboe@carl.(none)>
-rw-r--r--block/blk-merge.c6
-rw-r--r--block/elevator.c8
2 files changed, 14 insertions, 0 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 39ce64432ba6..e1999679a4d5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -350,6 +350,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
350 if (blk_integrity_rq(req) != blk_integrity_rq(next)) 350 if (blk_integrity_rq(req) != blk_integrity_rq(next))
351 return 0; 351 return 0;
352 352
353 /* don't merge requests of different failfast settings */
354 if (blk_failfast_dev(req) != blk_failfast_dev(next) ||
355 blk_failfast_transport(req) != blk_failfast_transport(next) ||
356 blk_failfast_driver(req) != blk_failfast_driver(next))
357 return 0;
358
353 /* 359 /*
354 * If we are allowed to merge, then append bio list 360 * If we are allowed to merge, then append bio list
355 * from next to rq and release next. merge_requests_fn 361 * from next to rq and release next. merge_requests_fn
diff --git a/block/elevator.c b/block/elevator.c
index ca861927ba41..6f2375339a99 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -100,6 +100,14 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
100 if (bio_integrity(bio) != blk_integrity_rq(rq)) 100 if (bio_integrity(bio) != blk_integrity_rq(rq))
101 return 0; 101 return 0;
102 102
103 /*
104 * Don't merge if failfast settings don't match
105 */
106 if (bio_failfast_dev(bio) != blk_failfast_dev(rq) ||
107 bio_failfast_transport(bio) != blk_failfast_transport(rq) ||
108 bio_failfast_driver(bio) != blk_failfast_driver(rq))
109 return 0;
110
103 if (!elv_iosched_allow_merge(rq, bio)) 111 if (!elv_iosched_allow_merge(rq, bio))
104 return 0; 112 return 0;
105 113