diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-05 00:35:41 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-06 10:46:39 -0500 |
commit | e261f51f25b98c213e0b3d7f2109b117d714f69d (patch) | |
tree | 92dfd162f8721f99de74c8227a7f0655143eed9a /fs/nfs | |
parent | 4d770ccf4257b23a7ca2a85de1b1c22657b581d8 (diff) |
NFS: Make nfs_updatepage() mark the page as dirty.
This will ensure that we can call set_page_writeback() from within
nfs_writepage(), which is always called with the page lock set.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/write.c | 73 |
1 files changed, 56 insertions, 17 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0eca6a542106..130528d09a26 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -77,6 +77,7 @@ | |||
77 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, | 77 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, |
78 | struct page *, | 78 | struct page *, |
79 | unsigned int, unsigned int); | 79 | unsigned int, unsigned int); |
80 | static void nfs_mark_request_dirty(struct nfs_page *req); | ||
80 | static int nfs_wait_on_write_congestion(struct address_space *, int); | 81 | static int nfs_wait_on_write_congestion(struct address_space *, int); |
81 | static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); | 82 | static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); |
82 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); | 83 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); |
@@ -245,29 +246,64 @@ static int wb_priority(struct writeback_control *wbc) | |||
245 | } | 246 | } |
246 | 247 | ||
247 | /* | 248 | /* |
249 | * Find an associated nfs write request, and prepare to flush it out | ||
250 | * Returns 1 if there was no write request, or if the request was | ||
251 | * already tagged by nfs_set_page_dirty.Returns 0 if the request | ||
252 | * was not tagged. | ||
253 | * May also return an error if the user signalled nfs_wait_on_request(). | ||
254 | */ | ||
255 | static int nfs_page_mark_flush(struct page *page) | ||
256 | { | ||
257 | struct nfs_page *req; | ||
258 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | ||
259 | int ret; | ||
260 | |||
261 | spin_lock(req_lock); | ||
262 | for(;;) { | ||
263 | req = nfs_page_find_request_locked(page); | ||
264 | if (req == NULL) { | ||
265 | spin_unlock(req_lock); | ||
266 | return 1; | ||
267 | } | ||
268 | if (nfs_lock_request_dontget(req)) | ||
269 | break; | ||
270 | /* Note: If we hold the page lock, as is the case in nfs_writepage, | ||
271 | * then the call to nfs_lock_request_dontget() will always | ||
272 | * succeed provided that someone hasn't already marked the | ||
273 | * request as dirty (in which case we don't care). | ||
274 | */ | ||
275 | spin_unlock(req_lock); | ||
276 | ret = nfs_wait_on_request(req); | ||
277 | nfs_release_request(req); | ||
278 | if (ret != 0) | ||
279 | return ret; | ||
280 | spin_lock(req_lock); | ||
281 | } | ||
282 | spin_unlock(req_lock); | ||
283 | if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) | ||
284 | nfs_mark_request_dirty(req); | ||
285 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
286 | nfs_unlock_request(req); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | /* | ||
248 | * Write an mmapped page to the server. | 291 | * Write an mmapped page to the server. |
249 | */ | 292 | */ |
250 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) | 293 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) |
251 | { | 294 | { |
252 | struct nfs_open_context *ctx; | 295 | struct nfs_open_context *ctx; |
253 | struct inode *inode = page->mapping->host; | 296 | struct inode *inode = page->mapping->host; |
254 | struct nfs_page *req; | ||
255 | unsigned offset; | 297 | unsigned offset; |
256 | int err = 0; | 298 | int err; |
257 | 299 | ||
258 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | 300 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
259 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 301 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
260 | 302 | ||
261 | req = nfs_page_find_request(page); | 303 | err = nfs_page_mark_flush(page); |
262 | if (req != NULL) { | 304 | if (err <= 0) |
263 | int flushme = test_bit(PG_NEED_FLUSH, &req->wb_flags); | 305 | goto out; |
264 | nfs_release_request(req); | 306 | err = 0; |
265 | if (!flushme) | ||
266 | goto out; | ||
267 | /* Ensure we've flushed out the invalid write */ | ||
268 | nfs_wb_page_priority(inode, page, wb_priority(wbc) | FLUSH_STABLE | FLUSH_NOWRITEPAGE); | ||
269 | } | ||
270 | |||
271 | offset = nfs_page_length(page); | 307 | offset = nfs_page_length(page); |
272 | if (!offset) | 308 | if (!offset) |
273 | goto out; | 309 | goto out; |
@@ -279,7 +315,11 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc | |||
279 | } | 315 | } |
280 | err = nfs_writepage_setup(ctx, page, 0, offset); | 316 | err = nfs_writepage_setup(ctx, page, 0, offset); |
281 | put_nfs_open_context(ctx); | 317 | put_nfs_open_context(ctx); |
282 | 318 | if (err != 0) | |
319 | goto out; | ||
320 | err = nfs_page_mark_flush(page); | ||
321 | if (err > 0) | ||
322 | err = 0; | ||
283 | out: | 323 | out: |
284 | if (!wbc->for_writepages) | 324 | if (!wbc->for_writepages) |
285 | nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc)); | 325 | nfs_flush_mapping(page->mapping, wbc, wb_priority(wbc)); |
@@ -409,8 +449,7 @@ nfs_mark_request_dirty(struct nfs_page *req) | |||
409 | static inline int | 449 | static inline int |
410 | nfs_dirty_request(struct nfs_page *req) | 450 | nfs_dirty_request(struct nfs_page *req) |
411 | { | 451 | { |
412 | struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode); | 452 | return test_bit(PG_FLUSHING, &req->wb_flags) == 0; |
413 | return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty; | ||
414 | } | 453 | } |
415 | 454 | ||
416 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 455 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
@@ -628,7 +667,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
628 | return ERR_PTR(error); | 667 | return ERR_PTR(error); |
629 | } | 668 | } |
630 | spin_unlock(&nfsi->req_lock); | 669 | spin_unlock(&nfsi->req_lock); |
631 | nfs_mark_request_dirty(new); | ||
632 | return new; | 670 | return new; |
633 | } | 671 | } |
634 | spin_unlock(&nfsi->req_lock); | 672 | spin_unlock(&nfsi->req_lock); |
@@ -684,7 +722,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
684 | if (req == NULL) | 722 | if (req == NULL) |
685 | return 0; | 723 | return 0; |
686 | do_flush = req->wb_page != page || req->wb_context != ctx | 724 | do_flush = req->wb_page != page || req->wb_context != ctx |
687 | || test_bit(PG_NEED_FLUSH, &req->wb_flags); | 725 | || !nfs_dirty_request(req); |
688 | nfs_release_request(req); | 726 | nfs_release_request(req); |
689 | if (!do_flush) | 727 | if (!do_flush) |
690 | return 0; | 728 | return 0; |
@@ -723,6 +761,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
723 | } | 761 | } |
724 | 762 | ||
725 | status = nfs_writepage_setup(ctx, page, offset, count); | 763 | status = nfs_writepage_setup(ctx, page, offset, count); |
764 | __set_page_dirty_nobuffers(page); | ||
726 | 765 | ||
727 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", | 766 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", |
728 | status, (long long)i_size_read(inode)); | 767 | status, (long long)i_size_read(inode)); |