aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2017-06-20 19:35:37 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-07-13 15:58:05 -0400
commit919e3bd9a87593520a2c5dfda27bd3e6599852ed (patch)
tree94bbb463d528d3ecf32967d067a2781b776709bc /fs/nfs/write.c
parentb5973a8c1ccf375c9ab9e2428e1185e3f799af06 (diff)
NFS: Ensure we commit after writeback is complete
If the page cache is being flushed, then we want to ensure that we do start a commit once the pages are done being flushed. If we just wait until all I/O is done to that file, we can end up livelocking until the balance_dirty_pages() mechanism puts its foot down and forces I/O to stop. So instead we do more or less the same thing that O_DIRECT does, and set up a counter to tell us when the flush is done, Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c57
1 files changed, 57 insertions, 0 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index db7ba542559e..051197cb9195 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -40,6 +40,12 @@
40#define MIN_POOL_WRITE (32) 40#define MIN_POOL_WRITE (32)
41#define MIN_POOL_COMMIT (4) 41#define MIN_POOL_COMMIT (4)
42 42
43struct nfs_io_completion {
44 void (*complete)(void *data);
45 void *data;
46 struct kref refcount;
47};
48
43/* 49/*
44 * Local function declarations 50 * Local function declarations
45 */ 51 */
@@ -108,6 +114,39 @@ static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
108 mempool_free(hdr, nfs_wdata_mempool); 114 mempool_free(hdr, nfs_wdata_mempool);
109} 115}
110 116
117static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
118{
119 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
120}
121
122static void nfs_io_completion_init(struct nfs_io_completion *ioc,
123 void (*complete)(void *), void *data)
124{
125 ioc->complete = complete;
126 ioc->data = data;
127 kref_init(&ioc->refcount);
128}
129
130static void nfs_io_completion_release(struct kref *kref)
131{
132 struct nfs_io_completion *ioc = container_of(kref,
133 struct nfs_io_completion, refcount);
134 ioc->complete(ioc->data);
135 kfree(ioc);
136}
137
138static void nfs_io_completion_get(struct nfs_io_completion *ioc)
139{
140 if (ioc != NULL)
141 kref_get(&ioc->refcount);
142}
143
144static void nfs_io_completion_put(struct nfs_io_completion *ioc)
145{
146 if (ioc != NULL)
147 kref_put(&ioc->refcount, nfs_io_completion_release);
148}
149
111static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) 150static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
112{ 151{
113 ctx->error = error; 152 ctx->error = error;
@@ -681,18 +720,29 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
681 return ret; 720 return ret;
682} 721}
683 722
723static void nfs_io_completion_commit(void *inode)
724{
725 nfs_commit_inode(inode, 0);
726}
727
684int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 728int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
685{ 729{
686 struct inode *inode = mapping->host; 730 struct inode *inode = mapping->host;
687 struct nfs_pageio_descriptor pgio; 731 struct nfs_pageio_descriptor pgio;
732 struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS);
688 int err; 733 int err;
689 734
690 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 735 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
691 736
737 if (ioc)
738 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode);
739
692 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, 740 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false,
693 &nfs_async_write_completion_ops); 741 &nfs_async_write_completion_ops);
742 pgio.pg_io_completion = ioc;
694 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 743 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
695 nfs_pageio_complete(&pgio); 744 nfs_pageio_complete(&pgio);
745 nfs_io_completion_put(ioc);
696 746
697 if (err < 0) 747 if (err < 0)
698 goto out_err; 748 goto out_err;
@@ -940,6 +990,11 @@ int nfs_write_need_commit(struct nfs_pgio_header *hdr)
940 return hdr->verf.committed != NFS_FILE_SYNC; 990 return hdr->verf.committed != NFS_FILE_SYNC;
941} 991}
942 992
993static void nfs_async_write_init(struct nfs_pgio_header *hdr)
994{
995 nfs_io_completion_get(hdr->io_completion);
996}
997
943static void nfs_write_completion(struct nfs_pgio_header *hdr) 998static void nfs_write_completion(struct nfs_pgio_header *hdr)
944{ 999{
945 struct nfs_commit_info cinfo; 1000 struct nfs_commit_info cinfo;
@@ -973,6 +1028,7 @@ next:
973 nfs_release_request(req); 1028 nfs_release_request(req);
974 } 1029 }
975out: 1030out:
1031 nfs_io_completion_put(hdr->io_completion);
976 hdr->release(hdr); 1032 hdr->release(hdr);
977} 1033}
978 1034
@@ -1378,6 +1434,7 @@ static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1378} 1434}
1379 1435
1380static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1436static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1437 .init_hdr = nfs_async_write_init,
1381 .error_cleanup = nfs_async_write_error, 1438 .error_cleanup = nfs_async_write_error,
1382 .completion = nfs_write_completion, 1439 .completion = nfs_write_completion,
1383 .reschedule_io = nfs_async_write_reschedule_io, 1440 .reschedule_io = nfs_async_write_reschedule_io,