aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2013-02-21 19:42:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-21 20:22:20 -0500
commitffecfd1a72fccfcee3dabb99b9ecba9735318f90 (patch)
treea5c3caf67249ec811a2c2c95678d9349fd8e2412 /block
parent13575ca14fcdacd1ad914d00bc63eb4d96280986 (diff)
block: optionally snapshot page contents to provide stable pages during write
This provides a band-aid to provide stable page writes on jbd without needing to backport the fixed locking and page writeback bit handling schemes of jbd2. The band-aid works by using bounce buffers to snapshot page contents instead of waiting. For those wondering about the ext3 bandage -- fixing the jbd locking (which was done as part of ext4dev years ago) is a lot of surgery, and setting PG_writeback on data pages when we actually hold the page lock dropped ext3 performance by nearly an order of magnitude. If we're going to migrate iscsi and raid to use stable page writes, the complaints about high latency will likely return. We might as well centralize their page snapshotting thing to one place. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Tested-by: Andy Lutomirski <luto@amacapital.net> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Artem Bityutskiy <dedekind1@gmail.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Joel Becker <jlbec@evilplan.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Eric Van Hensbergen <ericvh@gmail.com> Cc: Ron Minnich <rminnich@sandia.gov> Cc: Latchesar Ionkov <lucho@ionkov.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c973249d68cd..277134cb5d32 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1474,6 +1474,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
1474 */ 1474 */
1475 blk_queue_bounce(q, &bio); 1475 blk_queue_bounce(q, &bio);
1476 1476
1477 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1478 bio_endio(bio, -EIO);
1479 return;
1480 }
1481
1477 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { 1482 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1478 spin_lock_irq(q->queue_lock); 1483 spin_lock_irq(q->queue_lock);
1479 where = ELEVATOR_INSERT_FLUSH; 1484 where = ELEVATOR_INSERT_FLUSH;
@@ -1714,9 +1719,6 @@ generic_make_request_checks(struct bio *bio)
1714 */ 1719 */
1715 blk_partition_remap(bio); 1720 blk_partition_remap(bio);
1716 1721
1717 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1718 goto end_io;
1719
1720 if (bio_check_eod(bio, nr_sectors)) 1722 if (bio_check_eod(bio, nr_sectors))
1721 goto end_io; 1723 goto end_io;
1722 1724