aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-06-13 12:12:32 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-07-09 12:09:23 -0400
commite7d39069e387a12d4c57f4067d9f48c1d29ea900 (patch)
tree5c22fa8d08bc0ed90af16b4c8c1044e6da15bbac /fs/nfs/write.c
parent396cee977f79590673ad51b04f1853e58bc30e7b (diff)
NFS: Clean up nfs_update_request()
Simplify the loop in nfs_update_request by moving into a separate function the code that attempts to update an existing cached NFS write. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c201
1 files changed, 103 insertions, 98 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 21d8a48b624b..04f51e52e184 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -34,9 +34,6 @@
34/* 34/*
35 * Local function declarations 35 * Local function declarations
36 */ 36 */
37static struct nfs_page * nfs_update_request(struct nfs_open_context*,
38 struct page *,
39 unsigned int, unsigned int);
40static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, 37static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
41 struct inode *inode, int ioflags); 38 struct inode *inode, int ioflags);
42static void nfs_redirty_request(struct nfs_page *req); 39static void nfs_redirty_request(struct nfs_page *req);
@@ -169,30 +166,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
169 SetPageUptodate(page); 166 SetPageUptodate(page);
170} 167}
171 168
172static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
173 unsigned int offset, unsigned int count)
174{
175 struct nfs_page *req;
176 int ret;
177
178 for (;;) {
179 req = nfs_update_request(ctx, page, offset, count);
180 if (!IS_ERR(req))
181 break;
182 ret = PTR_ERR(req);
183 if (ret != -EBUSY)
184 return ret;
185 ret = nfs_wb_page(page->mapping->host, page);
186 if (ret != 0)
187 return ret;
188 }
189 /* Update file length */
190 nfs_grow_file(page, offset, count);
191 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
192 nfs_clear_page_tag_locked(req);
193 return 0;
194}
195
196static int wb_priority(struct writeback_control *wbc) 169static int wb_priority(struct writeback_control *wbc)
197{ 170{
198 if (wbc->for_reclaim) 171 if (wbc->for_reclaim)
@@ -356,11 +329,19 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
356/* 329/*
357 * Insert a write request into an inode 330 * Insert a write request into an inode
358 */ 331 */
359static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 332static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
360{ 333{
361 struct nfs_inode *nfsi = NFS_I(inode); 334 struct nfs_inode *nfsi = NFS_I(inode);
362 int error; 335 int error;
363 336
337 error = radix_tree_preload(GFP_NOFS);
338 if (error != 0)
339 goto out;
340
341 /* Lock the request! */
342 nfs_lock_request_dontget(req);
343
344 spin_lock(&inode->i_lock);
364 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); 345 error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
365 BUG_ON(error); 346 BUG_ON(error);
366 if (!nfsi->npages) { 347 if (!nfsi->npages) {
@@ -374,6 +355,10 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
374 kref_get(&req->wb_kref); 355 kref_get(&req->wb_kref);
375 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, 356 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
376 NFS_PAGE_TAG_LOCKED); 357 NFS_PAGE_TAG_LOCKED);
358 spin_unlock(&inode->i_lock);
359 radix_tree_preload_end();
360out:
361 return error;
377} 362}
378 363
379/* 364/*
@@ -565,101 +550,121 @@ static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pg
565#endif 550#endif
566 551
567/* 552/*
568 * Try to update any existing write request, or create one if there is none. 553 * Search for an existing write request, and attempt to update
569 * In order to match, the request's credentials must match those of 554 * it to reflect a new dirty region on a given page.
570 * the calling process.
571 * 555 *
572 * Note: Should always be called with the Page Lock held! 556 * If the attempt fails, then the existing request is flushed out
557 * to disk.
573 */ 558 */
574static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, 559static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
575 struct page *page, unsigned int offset, unsigned int bytes) 560 struct page *page,
561 unsigned int offset,
562 unsigned int bytes)
576{ 563{
577 struct address_space *mapping = page->mapping; 564 struct nfs_page *req;
578 struct inode *inode = mapping->host; 565 unsigned int rqend;
579 struct nfs_page *req, *new = NULL; 566 unsigned int end;
580 pgoff_t rqend, end; 567 int error;
568
569 if (!PagePrivate(page))
570 return NULL;
581 571
582 end = offset + bytes; 572 end = offset + bytes;
573 spin_lock(&inode->i_lock);
583 574
584 for (;;) { 575 for (;;) {
585 /* Loop over all inode entries and see if we find
586 * A request for the page we wish to update
587 */
588 spin_lock(&inode->i_lock);
589 req = nfs_page_find_request_locked(page); 576 req = nfs_page_find_request_locked(page);
590 if (req) { 577 if (req == NULL)
591 if (!nfs_set_page_tag_locked(req)) { 578 goto out_unlock;
592 int error; 579
593 580 rqend = req->wb_offset + req->wb_bytes;
594 spin_unlock(&inode->i_lock); 581 /*
595 error = nfs_wait_on_request(req); 582 * Tell the caller to flush out the request if
596 nfs_release_request(req); 583 * the offsets are non-contiguous.
597 if (error < 0) { 584 * Note: nfs_flush_incompatible() will already
598 if (new) { 585 * have flushed out requests having wrong owners.
599 radix_tree_preload_end(); 586 */
600 nfs_release_request(new); 587 if (!nfs_dirty_request(req)
601 } 588 || offset > rqend
602 return ERR_PTR(error); 589 || end < req->wb_offset)
603 } 590 goto out_flushme;
604 continue; 591
605 } 592 if (nfs_set_page_tag_locked(req))
606 spin_unlock(&inode->i_lock);
607 if (new) {
608 radix_tree_preload_end();
609 nfs_release_request(new);
610 }
611 break; 593 break;
612 }
613 594
614 if (new) { 595 /* The request is locked, so wait and then retry */
615 nfs_lock_request_dontget(new);
616 nfs_inode_add_request(inode, new);
617 spin_unlock(&inode->i_lock);
618 radix_tree_preload_end();
619 req = new;
620 goto out;
621 }
622 spin_unlock(&inode->i_lock); 596 spin_unlock(&inode->i_lock);
623 597 error = nfs_wait_on_request(req);
624 new = nfs_create_request(ctx, inode, page, offset, bytes); 598 nfs_release_request(req);
625 if (IS_ERR(new)) 599 if (error != 0)
626 return new; 600 goto out_err;
627 if (radix_tree_preload(GFP_NOFS)) { 601 spin_lock(&inode->i_lock);
628 nfs_release_request(new);
629 return ERR_PTR(-ENOMEM);
630 }
631 }
632
633 /* We have a request for our page.
634 * If the creds don't match, or the
635 * page addresses don't match,
636 * tell the caller to wait on the conflicting
637 * request.
638 */
639 rqend = req->wb_offset + req->wb_bytes;
640 if (req->wb_context != ctx
641 || req->wb_page != page
642 || !nfs_dirty_request(req)
643 || offset > rqend || end < req->wb_offset) {
644 nfs_clear_page_tag_locked(req);
645 return ERR_PTR(-EBUSY);
646 } 602 }
647 603
648 /* Okay, the request matches. Update the region */ 604 /* Okay, the request matches. Update the region */
649 if (offset < req->wb_offset) { 605 if (offset < req->wb_offset) {
650 req->wb_offset = offset; 606 req->wb_offset = offset;
651 req->wb_pgbase = offset; 607 req->wb_pgbase = offset;
652 req->wb_bytes = max(end, rqend) - req->wb_offset;
653 goto out;
654 } 608 }
655
656 if (end > rqend) 609 if (end > rqend)
657 req->wb_bytes = end - req->wb_offset; 610 req->wb_bytes = end - req->wb_offset;
611 else
612 req->wb_bytes = rqend - req->wb_offset;
613out_unlock:
614 spin_unlock(&inode->i_lock);
615 return req;
616out_flushme:
617 spin_unlock(&inode->i_lock);
618 nfs_release_request(req);
619 error = nfs_wb_page(inode, page);
620out_err:
621 return ERR_PTR(error);
622}
658 623
624/*
625 * Try to update an existing write request, or create one if there is none.
626 *
627 * Note: Should always be called with the Page Lock held to prevent races
628 * if we have to add a new request. Also assumes that the caller has
629 * already called nfs_flush_incompatible() if necessary.
630 */
631static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
632 struct page *page, unsigned int offset, unsigned int bytes)
633{
634 struct inode *inode = page->mapping->host;
635 struct nfs_page *req;
636 int error;
637
638 req = nfs_try_to_update_request(inode, page, offset, bytes);
639 if (req != NULL)
640 goto out;
641 req = nfs_create_request(ctx, inode, page, offset, bytes);
642 if (IS_ERR(req))
643 goto out;
644 error = nfs_inode_add_request(inode, req);
645 if (error != 0) {
646 nfs_release_request(req);
647 req = ERR_PTR(error);
648 }
659out: 649out:
660 return req; 650 return req;
661} 651}
662 652
653static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
654 unsigned int offset, unsigned int count)
655{
656 struct nfs_page *req;
657
658 req = nfs_setup_write_request(ctx, page, offset, count);
659 if (IS_ERR(req))
660 return PTR_ERR(req);
661 /* Update file length */
662 nfs_grow_file(page, offset, count);
663 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
664 nfs_clear_page_tag_locked(req);
665 return 0;
666}
667
663int nfs_flush_incompatible(struct file *file, struct page *page) 668int nfs_flush_incompatible(struct file *file, struct page *page)
664{ 669{
665 struct nfs_open_context *ctx = nfs_file_open_context(file); 670 struct nfs_open_context *ctx = nfs_file_open_context(file);