aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.h
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-11-06 22:02:51 -0500
committerChris Mason <chris.mason@oracle.com>2008-11-06 22:02:51 -0500
commit771ed689d2cd53439e28e095bc38fbe40a71429e (patch)
tree518801f7141928e398d40c2b5955720d4346ce1a /fs/btrfs/extent_io.h
parent4a69a41009c4ac691f7d9c289f5f37fabeddce46 (diff)
Btrfs: Optimize compressed writeback and reads
When reading compressed extents, try to put pages into the page cache for any pages covered by the compressed extent that readpages didn't already preload. Add an async work queue to handle transformations at delayed allocation processing time. Right now this is just compression. The workflow is: 1) Find offsets in the file marked for delayed allocation 2) Lock the pages 3) Lock the state bits 4) Call the async delalloc code The async delalloc code clears the state lock bits and delalloc bits. It is important this happens before the range goes into the work queue because otherwise it might deadlock with other work queue items that try to lock those extent bits. The file pages are compressed, and if the compression doesn't work the pages are written back directly. An ordered work queue is used to make sure the inodes are written in the same order that pdflush or writepages sent them down. This changes extent_write_cache_pages to let the writepage function update the wbc nr_written count. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.h')
-rw-r--r--fs/btrfs/extent_io.h13
1 files changed, 10 insertions, 3 deletions
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 283110ec4ee0..2d5f67065b69 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -35,7 +35,8 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
35 unsigned long bio_flags); 35 unsigned long bio_flags);
36struct extent_io_ops { 36struct extent_io_ops {
37 int (*fill_delalloc)(struct inode *inode, struct page *locked_page, 37 int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
38 u64 start, u64 end, int *page_started); 38 u64 start, u64 end, int *page_started,
39 unsigned long *nr_written);
39 int (*writepage_start_hook)(struct page *page, u64 start, u64 end); 40 int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
40 int (*writepage_io_hook)(struct page *page, u64 start, u64 end); 41 int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
41 extent_submit_bio_hook_t *submit_bio_hook; 42 extent_submit_bio_hook_t *submit_bio_hook;
@@ -172,6 +173,9 @@ int extent_invalidatepage(struct extent_io_tree *tree,
172int extent_write_full_page(struct extent_io_tree *tree, struct page *page, 173int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
173 get_extent_t *get_extent, 174 get_extent_t *get_extent,
174 struct writeback_control *wbc); 175 struct writeback_control *wbc);
176int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
177 u64 start, u64 end, get_extent_t *get_extent,
178 int mode);
175int extent_writepages(struct extent_io_tree *tree, 179int extent_writepages(struct extent_io_tree *tree,
176 struct address_space *mapping, 180 struct address_space *mapping,
177 get_extent_t *get_extent, 181 get_extent_t *get_extent,
@@ -256,6 +260,9 @@ int extent_range_uptodate(struct extent_io_tree *tree,
256int extent_clear_unlock_delalloc(struct inode *inode, 260int extent_clear_unlock_delalloc(struct inode *inode,
257 struct extent_io_tree *tree, 261 struct extent_io_tree *tree,
258 u64 start, u64 end, struct page *locked_page, 262 u64 start, u64 end, struct page *locked_page,
259 int clear_dirty, int set_writeback, 263 int unlock_page,
260 int clear_writeback); 264 int clear_unlock,
265 int clear_delalloc, int clear_dirty,
266 int set_writeback,
267 int end_writeback);
261#endif 268#endif