aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeng Tao <tao.peng@primarydata.com>2015-12-05 02:57:31 -0500
committerTrond Myklebust <trond.myklebust@primarydata.com>2015-12-28 14:32:37 -0500
commit0bcbf039f6b2bcefe4f5dada76079080edf9ecd0 (patch)
tree8edf8f581dcd12258d7ab84898261a3a3d0ab2ef
parent2bff2288579f1e4af2f05a7f7443c85b7766d5ac (diff)
nfs: handle request add failure properly
When we fail to queue a read page to IO descriptor, we need to clean it up otherwise it is hanging around preventing nfs module from being removed. When we fail to queue a write page to IO descriptor, we need to clean it up and also save the failure status to open context. Then at file close, we can try to write pages back again and drop the page if it fails to writeback in .launder_page, which will be done in the next patch. Signed-off-by: Peng Tao <tao.peng@primarydata.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
-rw-r--r--fs/nfs/inode.c6
-rw-r--r--fs/nfs/internal.h14
-rw-r--r--fs/nfs/pnfs.c15
-rw-r--r--fs/nfs/read.c41
-rw-r--r--fs/nfs/write.c22
5 files changed, 67 insertions, 31 deletions
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c7e8b87da5b2..74fb1223c2f5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -912,6 +912,12 @@ void nfs_file_clear_open_context(struct file *filp)
912 if (ctx) { 912 if (ctx) {
913 struct inode *inode = d_inode(ctx->dentry); 913 struct inode *inode = d_inode(ctx->dentry);
914 914
915 /*
916 * We fatal error on write before. Try to writeback
917 * every page again.
918 */
919 if (ctx->error < 0)
920 invalidate_inode_pages2(inode->i_mapping);
915 filp->private_data = NULL; 921 filp->private_data = NULL;
916 spin_lock(&inode->i_lock); 922 spin_lock(&inode->i_lock);
917 list_move_tail(&ctx->list, &NFS_I(inode)->open_files); 923 list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 313d55402238..68f773dc226e 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -711,3 +711,17 @@ static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
711 return 0; 711 return 0;
712} 712}
713#endif 713#endif
714
715static inline bool nfs_error_is_fatal(int err)
716{
717 switch (err) {
718 case -ERESTARTSYS:
719 case -EIO:
720 case -ENOSPC:
721 case -EROFS:
722 case -E2BIG:
723 return true;
724 default:
725 return false;
726 }
727}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0fb3552ccfbe..580207bc52a5 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -904,18 +904,9 @@ send_layoutget(struct pnfs_layout_hdr *lo,
904 lseg = nfs4_proc_layoutget(lgp, gfp_flags); 904 lseg = nfs4_proc_layoutget(lgp, gfp_flags);
905 } while (lseg == ERR_PTR(-EAGAIN)); 905 } while (lseg == ERR_PTR(-EAGAIN));
906 906
907 if (IS_ERR(lseg)) { 907 if (IS_ERR(lseg) && !nfs_error_is_fatal(PTR_ERR(lseg)))
908 switch (PTR_ERR(lseg)) { 908 lseg = NULL;
909 case -ERESTARTSYS: 909 else
910 case -EIO:
911 case -ENOSPC:
912 case -EROFS:
913 case -E2BIG:
914 break;
915 default:
916 return NULL;
917 }
918 } else
919 pnfs_layout_clear_fail_bit(lo, 910 pnfs_layout_clear_fail_bit(lo,
920 pnfs_iomode_to_fail_bit(range->iomode)); 911 pnfs_iomode_to_fail_bit(range->iomode));
921 912
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 0bb580174cb3..eb31e23e7def 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -85,6 +85,23 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
85} 85}
86EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 86EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
87 87
88static void nfs_readpage_release(struct nfs_page *req)
89{
90 struct inode *inode = d_inode(req->wb_context->dentry);
91
92 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
93 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
94 (long long)req_offset(req));
95
96 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
97 if (PageUptodate(req->wb_page))
98 nfs_readpage_to_fscache(inode, req->wb_page, 0);
99
100 unlock_page(req->wb_page);
101 }
102 nfs_release_request(req);
103}
104
88int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 105int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
89 struct page *page) 106 struct page *page)
90{ 107{
@@ -106,7 +123,10 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
106 123
107 nfs_pageio_init_read(&pgio, inode, false, 124 nfs_pageio_init_read(&pgio, inode, false,
108 &nfs_async_read_completion_ops); 125 &nfs_async_read_completion_ops);
109 nfs_pageio_add_request(&pgio, new); 126 if (!nfs_pageio_add_request(&pgio, new)) {
127 nfs_list_remove_request(new);
128 nfs_readpage_release(new);
129 }
110 nfs_pageio_complete(&pgio); 130 nfs_pageio_complete(&pgio);
111 131
112 /* It doesn't make sense to do mirrored reads! */ 132 /* It doesn't make sense to do mirrored reads! */
@@ -118,23 +138,6 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
118 return pgio.pg_error < 0 ? pgio.pg_error : 0; 138 return pgio.pg_error < 0 ? pgio.pg_error : 0;
119} 139}
120 140
121static void nfs_readpage_release(struct nfs_page *req)
122{
123 struct inode *inode = d_inode(req->wb_context->dentry);
124
125 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
126 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
127 (long long)req_offset(req));
128
129 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
130 if (PageUptodate(req->wb_page))
131 nfs_readpage_to_fscache(inode, req->wb_page, 0);
132
133 unlock_page(req->wb_page);
134 }
135 nfs_release_request(req);
136}
137
138static void nfs_page_group_set_uptodate(struct nfs_page *req) 141static void nfs_page_group_set_uptodate(struct nfs_page *req)
139{ 142{
140 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) 143 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
@@ -361,6 +364,8 @@ readpage_async_filler(void *data, struct page *page)
361 if (len < PAGE_CACHE_SIZE) 364 if (len < PAGE_CACHE_SIZE)
362 zero_user_segment(page, len, PAGE_CACHE_SIZE); 365 zero_user_segment(page, len, PAGE_CACHE_SIZE);
363 if (!nfs_pageio_add_request(desc->pgio, new)) { 366 if (!nfs_pageio_add_request(desc->pgio, new)) {
367 nfs_list_remove_request(new);
368 nfs_readpage_release(new);
364 error = desc->pgio->pg_error; 369 error = desc->pgio->pg_error;
365 goto out_unlock; 370 goto out_unlock;
366 } 371 }
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7b9316406930..9dafb08ddae5 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -545,6 +545,15 @@ try_again:
545 return head; 545 return head;
546} 546}
547 547
548static void nfs_write_error_remove_page(struct nfs_page *req)
549{
550 nfs_unlock_request(req);
551 nfs_end_page_writeback(req);
552 nfs_release_request(req);
553 generic_error_remove_page(page_file_mapping(req->wb_page),
554 req->wb_page);
555}
556
548/* 557/*
549 * Find an associated nfs write request, and prepare to flush it out 558 * Find an associated nfs write request, and prepare to flush it out
550 * May return an error if the user signalled nfs_wait_on_request(). 559 * May return an error if the user signalled nfs_wait_on_request().
@@ -567,8 +576,19 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
567 576
568 ret = 0; 577 ret = 0;
569 if (!nfs_pageio_add_request(pgio, req)) { 578 if (!nfs_pageio_add_request(pgio, req)) {
570 nfs_redirty_request(req);
571 ret = pgio->pg_error; 579 ret = pgio->pg_error;
580 /*
581 * Remove the problematic req upon fatal errors,
582 * while other dirty pages can still be around
583 * until they get flushed.
584 */
585 if (nfs_error_is_fatal(ret)) {
586 nfs_context_set_write_error(req->wb_context, ret);
587 nfs_write_error_remove_page(req);
588 } else {
589 nfs_redirty_request(req);
590 ret = -EAGAIN;
591 }
572 } else 592 } else
573 nfs_add_stats(page_file_mapping(page)->host, 593 nfs_add_stats(page_file_mapping(page)->host,
574 NFSIOS_WRITEPAGES, 1); 594 NFSIOS_WRITEPAGES, 1);