diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-05 00:35:40 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-06 10:46:38 -0500 |
commit | 1a54533ec8d92a5edae97ec6ae10023ee71c4b46 (patch) | |
tree | a09db8af7974f13baa8d906149b77bf7db58bf1f /fs | |
parent | 200baa2112012dd8a13db9da3ee6885403f9c013 (diff) |
NFS: Add nfs_set_page_dirty()
We will want to allow nfs_writepage() to distinguish between pages that
have been marked as dirty by the VM, and those that have been marked as
dirty by nfs_updatepage().
In the former case, the entire page will want to be written out, and so any
requests that were pending need to be flushed out first.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/nfs/file.c | 2 | ||||
-rw-r--r-- | fs/nfs/write.c | 49 |
2 files changed, 37 insertions, 14 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 143a19037ce8..c2fe3bd83ab1 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -331,7 +331,7 @@ static int nfs_release_page(struct page *page, gfp_t gfp) | |||
331 | const struct address_space_operations nfs_file_aops = { | 331 | const struct address_space_operations nfs_file_aops = { |
332 | .readpage = nfs_readpage, | 332 | .readpage = nfs_readpage, |
333 | .readpages = nfs_readpages, | 333 | .readpages = nfs_readpages, |
334 | .set_page_dirty = __set_page_dirty_nobuffers, | 334 | .set_page_dirty = nfs_set_page_dirty, |
335 | .writepage = nfs_writepage, | 335 | .writepage = nfs_writepage, |
336 | .writepages = nfs_writepages, | 336 | .writepages = nfs_writepages, |
337 | .prepare_write = nfs_prepare_write, | 337 | .prepare_write = nfs_prepare_write, |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f0720b544b12..266fea71cfc0 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -251,16 +251,23 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) | |||
251 | { | 251 | { |
252 | struct nfs_open_context *ctx; | 252 | struct nfs_open_context *ctx; |
253 | struct inode *inode = page->mapping->host; | 253 | struct inode *inode = page->mapping->host; |
254 | struct nfs_page *req; | ||
254 | unsigned offset; | 255 | unsigned offset; |
255 | int err; | 256 | int err = 0; |
256 | 257 | ||
257 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | 258 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
258 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 259 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
259 | 260 | ||
260 | /* Ensure we've flushed out any previous writes */ | 261 | req = nfs_page_find_request(page); |
261 | nfs_wb_page_priority(inode, page, wb_priority(wbc)); | 262 | if (req != NULL) { |
263 | int flushme = test_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
264 | nfs_release_request(req); | ||
265 | if (!flushme) | ||
266 | goto out; | ||
267 | /* Ensure we've flushed out the invalid write */ | ||
268 | nfs_wb_page_priority(inode, page, wb_priority(wbc)); | ||
269 | } | ||
262 | 270 | ||
263 | err = 0; | ||
264 | offset = nfs_page_length(page); | 271 | offset = nfs_page_length(page); |
265 | if (!offset) | 272 | if (!offset) |
266 | goto out; | 273 | goto out; |
@@ -655,7 +662,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
655 | { | 662 | { |
656 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; | 663 | struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; |
657 | struct nfs_page *req; | 664 | struct nfs_page *req; |
658 | int status = 0; | 665 | int do_flush, status; |
659 | /* | 666 | /* |
660 | * Look for a request corresponding to this page. If there | 667 | * Look for a request corresponding to this page. If there |
661 | * is one, and it belongs to another file, we flush it out | 668 | * is one, and it belongs to another file, we flush it out |
@@ -664,15 +671,18 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
664 | * Also do the same if we find a request from an existing | 671 | * Also do the same if we find a request from an existing |
665 | * dropped page. | 672 | * dropped page. |
666 | */ | 673 | */ |
667 | req = nfs_page_find_request(page); | 674 | do { |
668 | if (req != NULL) { | 675 | req = nfs_page_find_request(page); |
669 | int do_flush = req->wb_page != page || req->wb_context != ctx; | 676 | if (req == NULL) |
670 | 677 | return 0; | |
678 | do_flush = req->wb_page != page || req->wb_context != ctx | ||
679 | || test_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
671 | nfs_release_request(req); | 680 | nfs_release_request(req); |
672 | if (do_flush) | 681 | if (!do_flush) |
673 | status = nfs_wb_page(page->mapping->host, page); | 682 | return 0; |
674 | } | 683 | status = nfs_wb_page(page->mapping->host, page); |
675 | return (status < 0) ? status : 0; | 684 | } while (status == 0); |
685 | return status; | ||
676 | } | 686 | } |
677 | 687 | ||
678 | /* | 688 | /* |
@@ -1437,6 +1447,19 @@ int nfs_wb_page(struct inode *inode, struct page* page) | |||
1437 | return nfs_wb_page_priority(inode, page, 0); | 1447 | return nfs_wb_page_priority(inode, page, 0); |
1438 | } | 1448 | } |
1439 | 1449 | ||
1450 | int nfs_set_page_dirty(struct page *page) | ||
1451 | { | ||
1452 | struct nfs_page *req; | ||
1453 | |||
1454 | req = nfs_page_find_request(page); | ||
1455 | if (req != NULL) { | ||
1456 | /* Mark any existing write requests for flushing */ | ||
1457 | set_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
1458 | nfs_release_request(req); | ||
1459 | } | ||
1460 | return __set_page_dirty_nobuffers(page); | ||
1461 | } | ||
1462 | |||
1440 | 1463 | ||
1441 | int __init nfs_init_writepagecache(void) | 1464 | int __init nfs_init_writepagecache(void) |
1442 | { | 1465 | { |