aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorPeng Tao <tao.peng@primarydata.com>2015-12-05 03:20:43 -0500
committerTrond Myklebust <trond.myklebust@primarydata.com>2015-12-28 14:32:38 -0500
commitd6c843b96e1cb5199147e3281a724e3c0b69a9ab (patch)
tree73d556f77eb68743a0752e150c493bd4062cad59 /fs/nfs/write.c
parent0bcbf039f6b2bcefe4f5dada76079080edf9ecd0 (diff)
nfs: only remove page from mapping if launder_page fails
Instead of dropping pages when write fails, only do it when we get fatal failure in launder_page write back. Signed-off-by: Peng Tao <tao.peng@primarydata.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c39
1 files changed, 23 insertions, 16 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9dafb08ddae5..4d254232d728 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -559,7 +559,8 @@ static void nfs_write_error_remove_page(struct nfs_page *req)
559 * May return an error if the user signalled nfs_wait_on_request(). 559 * May return an error if the user signalled nfs_wait_on_request().
560 */ 560 */
561static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 561static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
562 struct page *page, bool nonblock) 562 struct page *page, bool nonblock,
563 bool launder)
563{ 564{
564 struct nfs_page *req; 565 struct nfs_page *req;
565 int ret = 0; 566 int ret = 0;
@@ -578,17 +579,19 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
578 if (!nfs_pageio_add_request(pgio, req)) { 579 if (!nfs_pageio_add_request(pgio, req)) {
579 ret = pgio->pg_error; 580 ret = pgio->pg_error;
580 /* 581 /*
581 * Remove the problematic req upon fatal errors, 582 * Remove the problematic req upon fatal errors
582 * while other dirty pages can still be around 583 * in launder case, while other dirty pages can
583 * until they get flushed. 584 * still be around until they get flushed.
584 */ 585 */
585 if (nfs_error_is_fatal(ret)) { 586 if (nfs_error_is_fatal(ret)) {
586 nfs_context_set_write_error(req->wb_context, ret); 587 nfs_context_set_write_error(req->wb_context, ret);
587 nfs_write_error_remove_page(req); 588 if (launder) {
588 } else { 589 nfs_write_error_remove_page(req);
589 nfs_redirty_request(req); 590 goto out;
590 ret = -EAGAIN; 591 }
591 } 592 }
593 nfs_redirty_request(req);
594 ret = -EAGAIN;
592 } else 595 } else
593 nfs_add_stats(page_file_mapping(page)->host, 596 nfs_add_stats(page_file_mapping(page)->host,
594 NFSIOS_WRITEPAGES, 1); 597 NFSIOS_WRITEPAGES, 1);
@@ -596,12 +599,14 @@ out:
596 return ret; 599 return ret;
597} 600}
598 601
599static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 602static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
603 struct nfs_pageio_descriptor *pgio, bool launder)
600{ 604{
601 int ret; 605 int ret;
602 606
603 nfs_pageio_cond_complete(pgio, page_file_index(page)); 607 nfs_pageio_cond_complete(pgio, page_file_index(page));
604 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); 608 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE,
609 launder);
605 if (ret == -EAGAIN) { 610 if (ret == -EAGAIN) {
606 redirty_page_for_writepage(wbc, page); 611 redirty_page_for_writepage(wbc, page);
607 ret = 0; 612 ret = 0;
@@ -612,7 +617,9 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
612/* 617/*
613 * Write an mmapped page to the server. 618 * Write an mmapped page to the server.
614 */ 619 */
615static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) 620static int nfs_writepage_locked(struct page *page,
621 struct writeback_control *wbc,
622 bool launder)
616{ 623{
617 struct nfs_pageio_descriptor pgio; 624 struct nfs_pageio_descriptor pgio;
618 struct inode *inode = page_file_mapping(page)->host; 625 struct inode *inode = page_file_mapping(page)->host;
@@ -621,7 +628,7 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
621 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 628 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
622 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), 629 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
623 false, &nfs_async_write_completion_ops); 630 false, &nfs_async_write_completion_ops);
624 err = nfs_do_writepage(page, wbc, &pgio); 631 err = nfs_do_writepage(page, wbc, &pgio, launder);
625 nfs_pageio_complete(&pgio); 632 nfs_pageio_complete(&pgio);
626 if (err < 0) 633 if (err < 0)
627 return err; 634 return err;
@@ -634,7 +641,7 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
634{ 641{
635 int ret; 642 int ret;
636 643
637 ret = nfs_writepage_locked(page, wbc); 644 ret = nfs_writepage_locked(page, wbc, false);
638 unlock_page(page); 645 unlock_page(page);
639 return ret; 646 return ret;
640} 647}
@@ -643,7 +650,7 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
643{ 650{
644 int ret; 651 int ret;
645 652
646 ret = nfs_do_writepage(page, wbc, data); 653 ret = nfs_do_writepage(page, wbc, data, false);
647 unlock_page(page); 654 unlock_page(page);
648 return ret; 655 return ret;
649} 656}
@@ -1931,7 +1938,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1931/* 1938/*
1932 * Write back all requests on one page - we do this before reading it. 1939 * Write back all requests on one page - we do this before reading it.
1933 */ 1940 */
1934int nfs_wb_page(struct inode *inode, struct page *page) 1941int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder)
1935{ 1942{
1936 loff_t range_start = page_file_offset(page); 1943 loff_t range_start = page_file_offset(page);
1937 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1944 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
@@ -1948,7 +1955,7 @@ int nfs_wb_page(struct inode *inode, struct page *page)
1948 for (;;) { 1955 for (;;) {
1949 wait_on_page_writeback(page); 1956 wait_on_page_writeback(page);
1950 if (clear_page_dirty_for_io(page)) { 1957 if (clear_page_dirty_for_io(page)) {
1951 ret = nfs_writepage_locked(page, &wbc); 1958 ret = nfs_writepage_locked(page, &wbc, launder);
1952 if (ret < 0) 1959 if (ret < 0)
1953 goto out_error; 1960 goto out_error;
1954 continue; 1961 continue;