diff options
author | Alex Tomas <alex@clusterfs.com> | 2008-07-11 19:27:31 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-07-11 19:27:31 -0400 |
commit | 29a814d2ee0e43c2980f33f91c1311ec06c0aa35 (patch) | |
tree | dfc9d45fc3194237192b6cde1069faa70fe4c260 /fs | |
parent | 87c89c232c8f7b3820c33c3b9bc803e9358027da (diff) |
vfs: add hooks for ext4's delayed allocation support
Export mpage_bio_submit() and __mpage_writepage() for the benefit of
ext4's delayed allocation support. Also change __block_write_full_page
so that if buffers that have the BH_Delay flag set it will call
get_block() to get the physical block allocated, just as in the
!BH_Mapped case.
Signed-off-by: Alex Tomas <alex@clusterfs.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/buffer.c | 7 | ||||
-rw-r--r-- | fs/mpage.c | 14 |
2 files changed, 10 insertions, 11 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index f4b033237a02..5fa1512cd9a2 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1691,11 +1691,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1691 | */ | 1691 | */ |
1692 | clear_buffer_dirty(bh); | 1692 | clear_buffer_dirty(bh); |
1693 | set_buffer_uptodate(bh); | 1693 | set_buffer_uptodate(bh); |
1694 | } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { | 1694 | } else if ((!buffer_mapped(bh) || buffer_delay(bh)) && |
1695 | buffer_dirty(bh)) { | ||
1695 | WARN_ON(bh->b_size != blocksize); | 1696 | WARN_ON(bh->b_size != blocksize); |
1696 | err = get_block(inode, block, bh, 1); | 1697 | err = get_block(inode, block, bh, 1); |
1697 | if (err) | 1698 | if (err) |
1698 | goto recover; | 1699 | goto recover; |
1700 | clear_buffer_delay(bh); | ||
1699 | if (buffer_new(bh)) { | 1701 | if (buffer_new(bh)) { |
1700 | /* blockdev mappings never come here */ | 1702 | /* blockdev mappings never come here */ |
1701 | clear_buffer_new(bh); | 1703 | clear_buffer_new(bh); |
@@ -1774,7 +1776,8 @@ recover: | |||
1774 | bh = head; | 1776 | bh = head; |
1775 | /* Recovery: lock and submit the mapped buffers */ | 1777 | /* Recovery: lock and submit the mapped buffers */ |
1776 | do { | 1778 | do { |
1777 | if (buffer_mapped(bh) && buffer_dirty(bh)) { | 1779 | if (buffer_mapped(bh) && buffer_dirty(bh) && |
1780 | !buffer_delay(bh)) { | ||
1778 | lock_buffer(bh); | 1781 | lock_buffer(bh); |
1779 | mark_buffer_async_write(bh); | 1782 | mark_buffer_async_write(bh); |
1780 | } else { | 1783 | } else { |
diff --git a/fs/mpage.c b/fs/mpage.c index 235e4d3873a8..dbcc7af76a15 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -82,7 +82,7 @@ static void mpage_end_io_write(struct bio *bio, int err) | |||
82 | bio_put(bio); | 82 | bio_put(bio); |
83 | } | 83 | } |
84 | 84 | ||
85 | static struct bio *mpage_bio_submit(int rw, struct bio *bio) | 85 | struct bio *mpage_bio_submit(int rw, struct bio *bio) |
86 | { | 86 | { |
87 | bio->bi_end_io = mpage_end_io_read; | 87 | bio->bi_end_io = mpage_end_io_read; |
88 | if (rw == WRITE) | 88 | if (rw == WRITE) |
@@ -90,6 +90,7 @@ static struct bio *mpage_bio_submit(int rw, struct bio *bio) | |||
90 | submit_bio(rw, bio); | 90 | submit_bio(rw, bio); |
91 | return NULL; | 91 | return NULL; |
92 | } | 92 | } |
93 | EXPORT_SYMBOL(mpage_bio_submit); | ||
93 | 94 | ||
94 | static struct bio * | 95 | static struct bio * |
95 | mpage_alloc(struct block_device *bdev, | 96 | mpage_alloc(struct block_device *bdev, |
@@ -435,15 +436,9 @@ EXPORT_SYMBOL(mpage_readpage); | |||
435 | * written, so it can intelligently allocate a suitably-sized BIO. For now, | 436 | * written, so it can intelligently allocate a suitably-sized BIO. For now, |
436 | * just allocate full-size (16-page) BIOs. | 437 | * just allocate full-size (16-page) BIOs. |
437 | */ | 438 | */ |
438 | struct mpage_data { | ||
439 | struct bio *bio; | ||
440 | sector_t last_block_in_bio; | ||
441 | get_block_t *get_block; | ||
442 | unsigned use_writepage; | ||
443 | }; | ||
444 | 439 | ||
445 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | 440 | int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
446 | void *data) | 441 | void *data) |
447 | { | 442 | { |
448 | struct mpage_data *mpd = data; | 443 | struct mpage_data *mpd = data; |
449 | struct bio *bio = mpd->bio; | 444 | struct bio *bio = mpd->bio; |
@@ -651,6 +646,7 @@ out: | |||
651 | mpd->bio = bio; | 646 | mpd->bio = bio; |
652 | return ret; | 647 | return ret; |
653 | } | 648 | } |
649 | EXPORT_SYMBOL(__mpage_writepage); | ||
654 | 650 | ||
655 | /** | 651 | /** |
656 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them | 652 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them |