aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2009-04-20 03:16:26 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2009-05-11 07:36:43 -0400
commit4a0f9a321a113392b448e477018311d14fba2b34 (patch)
tree7092fa29a94ef5aaada6d56a8d761615280c5f71 /fs/gfs2
parentc969f58ca43fc403c75f5d3da4cf1e21de7afaa0 (diff)
GFS2: Optimise writepage for metadata
This adds a GFS2 specific writepage for metadata, rather than continuing to use the VFS function. As a result we now tag all our metadata I/O with the correct flag so that blktraces will now be less confusing. Also, the generic function was checking for a number of corner cases which cannot happen on the metadata address spaces so that this should be faster too. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/meta_io.c66
1 files changed, 57 insertions, 9 deletions
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 75b2aec06f85..78a5f4312667 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -33,17 +33,65 @@
33#include "util.h" 33#include "util.h"
34#include "ops_address.h" 34#include "ops_address.h"
35 35
36static int aspace_get_block(struct inode *inode, sector_t lblock, 36static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
37 struct buffer_head *bh_result, int create)
38{ 37{
39 gfs2_assert_warn(inode->i_sb->s_fs_info, 0); 38 int err;
40 return -EOPNOTSUPP; 39 struct buffer_head *bh, *head;
41} 40 int nr_underway = 0;
41 int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
42 WRITE_SYNC_PLUG : WRITE));
43
44 BUG_ON(!PageLocked(page));
45 BUG_ON(!page_has_buffers(page));
46
47 head = page_buffers(page);
48 bh = head;
49
50 do {
51 if (!buffer_mapped(bh))
52 continue;
53 /*
54 * If it's a fully non-blocking write attempt and we cannot
55 * lock the buffer then redirty the page. Note that this can
56 * potentially cause a busy-wait loop from pdflush and kswapd
57 * activity, but those code paths have their own higher-level
58 * throttling.
59 */
60 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
61 lock_buffer(bh);
62 } else if (!trylock_buffer(bh)) {
63 redirty_page_for_writepage(wbc, page);
64 continue;
65 }
66 if (test_clear_buffer_dirty(bh)) {
67 mark_buffer_async_write(bh);
68 } else {
69 unlock_buffer(bh);
70 }
71 } while ((bh = bh->b_this_page) != head);
72
73 /*
74 * The page and its buffers are protected by PageWriteback(), so we can
75 * drop the bh refcounts early.
76 */
77 BUG_ON(PageWriteback(page));
78 set_page_writeback(page);
79
80 do {
81 struct buffer_head *next = bh->b_this_page;
82 if (buffer_async_write(bh)) {
83 submit_bh(write_op, bh);
84 nr_underway++;
85 }
86 bh = next;
87 } while (bh != head);
88 unlock_page(page);
42 89
43static int gfs2_aspace_writepage(struct page *page, 90 err = 0;
44 struct writeback_control *wbc) 91 if (nr_underway == 0)
45{ 92 end_page_writeback(page);
46 return block_write_full_page(page, aspace_get_block, wbc); 93
94 return err;
47} 95}
48 96
49static const struct address_space_operations aspace_aops = { 97static const struct address_space_operations aspace_aops = {