aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c288
1 files changed, 201 insertions, 87 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9449b6835509..3f5225404c97 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -63,6 +63,7 @@
63#include <linux/smp_lock.h> 63#include <linux/smp_lock.h>
64 64
65#include "delegation.h" 65#include "delegation.h"
66#include "iostat.h"
66 67
67#define NFSDBG_FACILITY NFSDBG_PAGECACHE 68#define NFSDBG_FACILITY NFSDBG_PAGECACHE
68 69
@@ -76,20 +77,21 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*,
76 struct inode *, 77 struct inode *,
77 struct page *, 78 struct page *,
78 unsigned int, unsigned int); 79 unsigned int, unsigned int);
79static void nfs_writeback_done_partial(struct nfs_write_data *, int);
80static void nfs_writeback_done_full(struct nfs_write_data *, int);
81static int nfs_wait_on_write_congestion(struct address_space *, int); 80static int nfs_wait_on_write_congestion(struct address_space *, int);
82static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); 81static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
83static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, 82static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
84 unsigned int npages, int how); 83 unsigned int npages, int how);
84static const struct rpc_call_ops nfs_write_partial_ops;
85static const struct rpc_call_ops nfs_write_full_ops;
86static const struct rpc_call_ops nfs_commit_ops;
85 87
86static kmem_cache_t *nfs_wdata_cachep; 88static kmem_cache_t *nfs_wdata_cachep;
87mempool_t *nfs_wdata_mempool; 89static mempool_t *nfs_wdata_mempool;
88static mempool_t *nfs_commit_mempool; 90static mempool_t *nfs_commit_mempool;
89 91
90static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); 92static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
91 93
92static inline struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount) 94struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
93{ 95{
94 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); 96 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
95 97
@@ -100,11 +102,39 @@ static inline struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
100 p->pagevec = &p->page_array[0]; 102 p->pagevec = &p->page_array[0];
101 else { 103 else {
102 size_t size = ++pagecount * sizeof(struct page *); 104 size_t size = ++pagecount * sizeof(struct page *);
105 p->pagevec = kzalloc(size, GFP_NOFS);
106 if (!p->pagevec) {
107 mempool_free(p, nfs_commit_mempool);
108 p = NULL;
109 }
110 }
111 }
112 return p;
113}
114
115void nfs_commit_free(struct nfs_write_data *p)
116{
117 if (p && (p->pagevec != &p->page_array[0]))
118 kfree(p->pagevec);
119 mempool_free(p, nfs_commit_mempool);
120}
121
122struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
123{
124 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
125
126 if (p) {
127 memset(p, 0, sizeof(*p));
128 INIT_LIST_HEAD(&p->pages);
129 if (pagecount < NFS_PAGEVEC_SIZE)
130 p->pagevec = &p->page_array[0];
131 else {
132 size_t size = ++pagecount * sizeof(struct page *);
103 p->pagevec = kmalloc(size, GFP_NOFS); 133 p->pagevec = kmalloc(size, GFP_NOFS);
104 if (p->pagevec) { 134 if (p->pagevec) {
105 memset(p->pagevec, 0, size); 135 memset(p->pagevec, 0, size);
106 } else { 136 } else {
107 mempool_free(p, nfs_commit_mempool); 137 mempool_free(p, nfs_wdata_mempool);
108 p = NULL; 138 p = NULL;
109 } 139 }
110 } 140 }
@@ -112,11 +142,11 @@ static inline struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
112 return p; 142 return p;
113} 143}
114 144
115static inline void nfs_commit_free(struct nfs_write_data *p) 145void nfs_writedata_free(struct nfs_write_data *p)
116{ 146{
117 if (p && (p->pagevec != &p->page_array[0])) 147 if (p && (p->pagevec != &p->page_array[0]))
118 kfree(p->pagevec); 148 kfree(p->pagevec);
119 mempool_free(p, nfs_commit_mempool); 149 mempool_free(p, nfs_wdata_mempool);
120} 150}
121 151
122void nfs_writedata_release(void *wdata) 152void nfs_writedata_release(void *wdata)
@@ -136,6 +166,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
136 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); 166 end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
137 if (i_size >= end) 167 if (i_size >= end)
138 return; 168 return;
169 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
139 i_size_write(inode, end); 170 i_size_write(inode, end);
140} 171}
141 172
@@ -225,6 +256,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
225 wdata->args.pgbase += result; 256 wdata->args.pgbase += result;
226 written += result; 257 written += result;
227 count -= result; 258 count -= result;
259 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, result);
228 } while (count); 260 } while (count);
229 /* Update file length */ 261 /* Update file length */
230 nfs_grow_file(page, offset, written); 262 nfs_grow_file(page, offset, written);
@@ -281,6 +313,9 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
281 int priority = wb_priority(wbc); 313 int priority = wb_priority(wbc);
282 int err; 314 int err;
283 315
316 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
317 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
318
284 /* 319 /*
285 * Note: We need to ensure that we have a reference to the inode 320 * Note: We need to ensure that we have a reference to the inode
286 * if we are to do asynchronous writes. If not, waiting 321 * if we are to do asynchronous writes. If not, waiting
@@ -345,6 +380,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
345 struct inode *inode = mapping->host; 380 struct inode *inode = mapping->host;
346 int err; 381 int err;
347 382
383 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
384
348 err = generic_writepages(mapping, wbc); 385 err = generic_writepages(mapping, wbc);
349 if (err) 386 if (err)
350 return err; 387 return err;
@@ -356,6 +393,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
356 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc)); 393 err = nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
357 if (err < 0) 394 if (err < 0)
358 goto out; 395 goto out;
396 nfs_add_stats(inode, NFSIOS_WRITEPAGES, err);
359 wbc->nr_to_write -= err; 397 wbc->nr_to_write -= err;
360 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) { 398 if (!wbc->nonblocking && wbc->sync_mode == WB_SYNC_ALL) {
361 err = nfs_wait_on_requests(inode, 0, 0); 399 err = nfs_wait_on_requests(inode, 0, 0);
@@ -391,6 +429,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
391 if (nfs_have_delegation(inode, FMODE_WRITE)) 429 if (nfs_have_delegation(inode, FMODE_WRITE))
392 nfsi->change_attr++; 430 nfsi->change_attr++;
393 } 431 }
432 SetPagePrivate(req->wb_page);
394 nfsi->npages++; 433 nfsi->npages++;
395 atomic_inc(&req->wb_count); 434 atomic_inc(&req->wb_count);
396 return 0; 435 return 0;
@@ -407,6 +446,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
407 BUG_ON (!NFS_WBACK_BUSY(req)); 446 BUG_ON (!NFS_WBACK_BUSY(req));
408 447
409 spin_lock(&nfsi->req_lock); 448 spin_lock(&nfsi->req_lock);
449 ClearPagePrivate(req->wb_page);
410 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 450 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
411 nfsi->npages--; 451 nfsi->npages--;
412 if (!nfsi->npages) { 452 if (!nfsi->npages) {
@@ -499,8 +539,7 @@ nfs_mark_request_commit(struct nfs_page *req)
499 * 539 *
500 * Interruptible by signals only if mounted with intr flag. 540 * Interruptible by signals only if mounted with intr flag.
501 */ 541 */
502static int 542static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages)
503nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
504{ 543{
505 struct nfs_inode *nfsi = NFS_I(inode); 544 struct nfs_inode *nfsi = NFS_I(inode);
506 struct nfs_page *req; 545 struct nfs_page *req;
@@ -513,7 +552,6 @@ nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int
513 else 552 else
514 idx_end = idx_start + npages - 1; 553 idx_end = idx_start + npages - 1;
515 554
516 spin_lock(&nfsi->req_lock);
517 next = idx_start; 555 next = idx_start;
518 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { 556 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
519 if (req->wb_index > idx_end) 557 if (req->wb_index > idx_end)
@@ -526,15 +564,25 @@ nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int
526 spin_unlock(&nfsi->req_lock); 564 spin_unlock(&nfsi->req_lock);
527 error = nfs_wait_on_request(req); 565 error = nfs_wait_on_request(req);
528 nfs_release_request(req); 566 nfs_release_request(req);
567 spin_lock(&nfsi->req_lock);
529 if (error < 0) 568 if (error < 0)
530 return error; 569 return error;
531 spin_lock(&nfsi->req_lock);
532 res++; 570 res++;
533 } 571 }
534 spin_unlock(&nfsi->req_lock);
535 return res; 572 return res;
536} 573}
537 574
575static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int npages)
576{
577 struct nfs_inode *nfsi = NFS_I(inode);
578 int ret;
579
580 spin_lock(&nfsi->req_lock);
581 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
582 spin_unlock(&nfsi->req_lock);
583 return ret;
584}
585
538/* 586/*
539 * nfs_scan_dirty - Scan an inode for dirty requests 587 * nfs_scan_dirty - Scan an inode for dirty requests
540 * @inode: NFS inode to scan 588 * @inode: NFS inode to scan
@@ -586,6 +634,11 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_st
586 } 634 }
587 return res; 635 return res;
588} 636}
637#else
638static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages)
639{
640 return 0;
641}
589#endif 642#endif
590 643
591static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr) 644static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
@@ -598,6 +651,9 @@ static int nfs_wait_on_write_congestion(struct address_space *mapping, int intr)
598 651
599 if (!bdi_write_congested(bdi)) 652 if (!bdi_write_congested(bdi))
600 return 0; 653 return 0;
654
655 nfs_inc_stats(mapping->host, NFSIOS_CONGESTIONWAIT);
656
601 if (intr) { 657 if (intr) {
602 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host); 658 struct rpc_clnt *clnt = NFS_CLIENT(mapping->host);
603 sigset_t oldset; 659 sigset_t oldset;
@@ -653,8 +709,11 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
653 spin_unlock(&nfsi->req_lock); 709 spin_unlock(&nfsi->req_lock);
654 error = nfs_wait_on_request(req); 710 error = nfs_wait_on_request(req);
655 nfs_release_request(req); 711 nfs_release_request(req);
656 if (error < 0) 712 if (error < 0) {
713 if (new)
714 nfs_release_request(new);
657 return ERR_PTR(error); 715 return ERR_PTR(error);
716 }
658 continue; 717 continue;
659 } 718 }
660 spin_unlock(&nfsi->req_lock); 719 spin_unlock(&nfsi->req_lock);
@@ -748,6 +807,8 @@ int nfs_updatepage(struct file *file, struct page *page,
748 struct nfs_page *req; 807 struct nfs_page *req;
749 int status = 0; 808 int status = 0;
750 809
810 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
811
751 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n", 812 dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n",
752 file->f_dentry->d_parent->d_name.name, 813 file->f_dentry->d_parent->d_name.name,
753 file->f_dentry->d_name.name, count, 814 file->f_dentry->d_name.name, count,
@@ -857,10 +918,12 @@ static inline int flush_task_priority(int how)
857 */ 918 */
858static void nfs_write_rpcsetup(struct nfs_page *req, 919static void nfs_write_rpcsetup(struct nfs_page *req,
859 struct nfs_write_data *data, 920 struct nfs_write_data *data,
921 const struct rpc_call_ops *call_ops,
860 unsigned int count, unsigned int offset, 922 unsigned int count, unsigned int offset,
861 int how) 923 int how)
862{ 924{
863 struct inode *inode; 925 struct inode *inode;
926 int flags;
864 927
865 /* Set up the RPC argument and reply structs 928 /* Set up the RPC argument and reply structs
866 * NB: take care not to mess about with data->commit et al. */ 929 * NB: take care not to mess about with data->commit et al. */
@@ -881,6 +944,9 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
881 data->res.verf = &data->verf; 944 data->res.verf = &data->verf;
882 nfs_fattr_init(&data->fattr); 945 nfs_fattr_init(&data->fattr);
883 946
947 /* Set up the initial task struct. */
948 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
949 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
884 NFS_PROTO(inode)->write_setup(data, how); 950 NFS_PROTO(inode)->write_setup(data, how);
885 951
886 data->task.tk_priority = flush_task_priority(how); 952 data->task.tk_priority = flush_task_priority(how);
@@ -910,7 +976,7 @@ static void nfs_execute_write(struct nfs_write_data *data)
910 * Generate multiple small requests to write out a single 976 * Generate multiple small requests to write out a single
911 * contiguous dirty area on one page. 977 * contiguous dirty area on one page.
912 */ 978 */
913static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how) 979static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
914{ 980{
915 struct nfs_page *req = nfs_list_entry(head->next); 981 struct nfs_page *req = nfs_list_entry(head->next);
916 struct page *page = req->wb_page; 982 struct page *page = req->wb_page;
@@ -944,14 +1010,15 @@ static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
944 list_del_init(&data->pages); 1010 list_del_init(&data->pages);
945 1011
946 data->pagevec[0] = page; 1012 data->pagevec[0] = page;
947 data->complete = nfs_writeback_done_partial;
948 1013
949 if (nbytes > wsize) { 1014 if (nbytes > wsize) {
950 nfs_write_rpcsetup(req, data, wsize, offset, how); 1015 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1016 wsize, offset, how);
951 offset += wsize; 1017 offset += wsize;
952 nbytes -= wsize; 1018 nbytes -= wsize;
953 } else { 1019 } else {
954 nfs_write_rpcsetup(req, data, nbytes, offset, how); 1020 nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
1021 nbytes, offset, how);
955 nbytes = 0; 1022 nbytes = 0;
956 } 1023 }
957 nfs_execute_write(data); 1024 nfs_execute_write(data);
@@ -978,16 +1045,13 @@ out_bad:
978 * This is the case if nfs_updatepage detects a conflicting request 1045 * This is the case if nfs_updatepage detects a conflicting request
979 * that has been written but not committed. 1046 * that has been written but not committed.
980 */ 1047 */
981static int nfs_flush_one(struct list_head *head, struct inode *inode, int how) 1048static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
982{ 1049{
983 struct nfs_page *req; 1050 struct nfs_page *req;
984 struct page **pages; 1051 struct page **pages;
985 struct nfs_write_data *data; 1052 struct nfs_write_data *data;
986 unsigned int count; 1053 unsigned int count;
987 1054
988 if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
989 return nfs_flush_multi(head, inode, how);
990
991 data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages); 1055 data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
992 if (!data) 1056 if (!data)
993 goto out_bad; 1057 goto out_bad;
@@ -1005,9 +1069,8 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
1005 } 1069 }
1006 req = nfs_list_entry(data->pages.next); 1070 req = nfs_list_entry(data->pages.next);
1007 1071
1008 data->complete = nfs_writeback_done_full;
1009 /* Set up the argument struct */ 1072 /* Set up the argument struct */
1010 nfs_write_rpcsetup(req, data, count, 0, how); 1073 nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
1011 1074
1012 nfs_execute_write(data); 1075 nfs_execute_write(data);
1013 return 0; 1076 return 0;
@@ -1021,24 +1084,32 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
1021 return -ENOMEM; 1084 return -ENOMEM;
1022} 1085}
1023 1086
1024static int 1087static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how)
1025nfs_flush_list(struct list_head *head, int wpages, int how)
1026{ 1088{
1027 LIST_HEAD(one_request); 1089 LIST_HEAD(one_request);
1028 struct nfs_page *req; 1090 int (*flush_one)(struct inode *, struct list_head *, int);
1029 int error = 0; 1091 struct nfs_page *req;
1030 unsigned int pages = 0; 1092 int wpages = NFS_SERVER(inode)->wpages;
1093 int wsize = NFS_SERVER(inode)->wsize;
1094 int error;
1031 1095
1032 while (!list_empty(head)) { 1096 flush_one = nfs_flush_one;
1033 pages += nfs_coalesce_requests(head, &one_request, wpages); 1097 if (wsize < PAGE_CACHE_SIZE)
1098 flush_one = nfs_flush_multi;
1099 /* For single writes, FLUSH_STABLE is more efficient */
1100 if (npages <= wpages && npages == NFS_I(inode)->npages
1101 && nfs_list_entry(head->next)->wb_bytes <= wsize)
1102 how |= FLUSH_STABLE;
1103
1104 do {
1105 nfs_coalesce_requests(head, &one_request, wpages);
1034 req = nfs_list_entry(one_request.next); 1106 req = nfs_list_entry(one_request.next);
1035 error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how); 1107 error = flush_one(inode, &one_request, how);
1036 if (error < 0) 1108 if (error < 0)
1037 break; 1109 goto out_err;
1038 } 1110 } while (!list_empty(head));
1039 if (error >= 0) 1111 return 0;
1040 return pages; 1112out_err:
1041
1042 while (!list_empty(head)) { 1113 while (!list_empty(head)) {
1043 req = nfs_list_entry(head->next); 1114 req = nfs_list_entry(head->next);
1044 nfs_list_remove_request(req); 1115 nfs_list_remove_request(req);
@@ -1051,8 +1122,9 @@ nfs_flush_list(struct list_head *head, int wpages, int how)
1051/* 1122/*
1052 * Handle a write reply that flushed part of a page. 1123 * Handle a write reply that flushed part of a page.
1053 */ 1124 */
1054static void nfs_writeback_done_partial(struct nfs_write_data *data, int status) 1125static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1055{ 1126{
1127 struct nfs_write_data *data = calldata;
1056 struct nfs_page *req = data->req; 1128 struct nfs_page *req = data->req;
1057 struct page *page = req->wb_page; 1129 struct page *page = req->wb_page;
1058 1130
@@ -1062,11 +1134,14 @@ static void nfs_writeback_done_partial(struct nfs_write_data *data, int status)
1062 req->wb_bytes, 1134 req->wb_bytes,
1063 (long long)req_offset(req)); 1135 (long long)req_offset(req));
1064 1136
1065 if (status < 0) { 1137 if (nfs_writeback_done(task, data) != 0)
1138 return;
1139
1140 if (task->tk_status < 0) {
1066 ClearPageUptodate(page); 1141 ClearPageUptodate(page);
1067 SetPageError(page); 1142 SetPageError(page);
1068 req->wb_context->error = status; 1143 req->wb_context->error = task->tk_status;
1069 dprintk(", error = %d\n", status); 1144 dprintk(", error = %d\n", task->tk_status);
1070 } else { 1145 } else {
1071#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1146#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1072 if (data->verf.committed < NFS_FILE_SYNC) { 1147 if (data->verf.committed < NFS_FILE_SYNC) {
@@ -1087,6 +1162,11 @@ static void nfs_writeback_done_partial(struct nfs_write_data *data, int status)
1087 nfs_writepage_release(req); 1162 nfs_writepage_release(req);
1088} 1163}
1089 1164
1165static const struct rpc_call_ops nfs_write_partial_ops = {
1166 .rpc_call_done = nfs_writeback_done_partial,
1167 .rpc_release = nfs_writedata_release,
1168};
1169
1090/* 1170/*
1091 * Handle a write reply that flushes a whole page. 1171 * Handle a write reply that flushes a whole page.
1092 * 1172 *
@@ -1094,11 +1174,15 @@ static void nfs_writeback_done_partial(struct nfs_write_data *data, int status)
1094 * writebacks since the page->count is kept > 1 for as long 1174 * writebacks since the page->count is kept > 1 for as long
1095 * as the page has a write request pending. 1175 * as the page has a write request pending.
1096 */ 1176 */
1097static void nfs_writeback_done_full(struct nfs_write_data *data, int status) 1177static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1098{ 1178{
1179 struct nfs_write_data *data = calldata;
1099 struct nfs_page *req; 1180 struct nfs_page *req;
1100 struct page *page; 1181 struct page *page;
1101 1182
1183 if (nfs_writeback_done(task, data) != 0)
1184 return;
1185
1102 /* Update attributes as result of writeback. */ 1186 /* Update attributes as result of writeback. */
1103 while (!list_empty(&data->pages)) { 1187 while (!list_empty(&data->pages)) {
1104 req = nfs_list_entry(data->pages.next); 1188 req = nfs_list_entry(data->pages.next);
@@ -1111,13 +1195,13 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
1111 req->wb_bytes, 1195 req->wb_bytes,
1112 (long long)req_offset(req)); 1196 (long long)req_offset(req));
1113 1197
1114 if (status < 0) { 1198 if (task->tk_status < 0) {
1115 ClearPageUptodate(page); 1199 ClearPageUptodate(page);
1116 SetPageError(page); 1200 SetPageError(page);
1117 req->wb_context->error = status; 1201 req->wb_context->error = task->tk_status;
1118 end_page_writeback(page); 1202 end_page_writeback(page);
1119 nfs_inode_remove_request(req); 1203 nfs_inode_remove_request(req);
1120 dprintk(", error = %d\n", status); 1204 dprintk(", error = %d\n", task->tk_status);
1121 goto next; 1205 goto next;
1122 } 1206 }
1123 end_page_writeback(page); 1207 end_page_writeback(page);
@@ -1139,18 +1223,30 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
1139 } 1223 }
1140} 1224}
1141 1225
1226static const struct rpc_call_ops nfs_write_full_ops = {
1227 .rpc_call_done = nfs_writeback_done_full,
1228 .rpc_release = nfs_writedata_release,
1229};
1230
1231
1142/* 1232/*
1143 * This function is called when the WRITE call is complete. 1233 * This function is called when the WRITE call is complete.
1144 */ 1234 */
1145void nfs_writeback_done(struct rpc_task *task, void *calldata) 1235int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1146{ 1236{
1147 struct nfs_write_data *data = calldata;
1148 struct nfs_writeargs *argp = &data->args; 1237 struct nfs_writeargs *argp = &data->args;
1149 struct nfs_writeres *resp = &data->res; 1238 struct nfs_writeres *resp = &data->res;
1239 int status;
1150 1240
1151 dprintk("NFS: %4d nfs_writeback_done (status %d)\n", 1241 dprintk("NFS: %4d nfs_writeback_done (status %d)\n",
1152 task->tk_pid, task->tk_status); 1242 task->tk_pid, task->tk_status);
1153 1243
1244 /* Call the NFS version-specific code */
1245 status = NFS_PROTO(data->inode)->write_done(task, data);
1246 if (status != 0)
1247 return status;
1248 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1249
1154#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1250#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1155 if (resp->verf->committed < argp->stable && task->tk_status >= 0) { 1251 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1156 /* We tried a write call, but the server did not 1252 /* We tried a write call, but the server did not
@@ -1176,6 +1272,8 @@ void nfs_writeback_done(struct rpc_task *task, void *calldata)
1176 if (task->tk_status >= 0 && resp->count < argp->count) { 1272 if (task->tk_status >= 0 && resp->count < argp->count) {
1177 static unsigned long complain; 1273 static unsigned long complain;
1178 1274
1275 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1276
1179 /* Has the server at least made some progress? */ 1277 /* Has the server at least made some progress? */
1180 if (resp->count != 0) { 1278 if (resp->count != 0) {
1181 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1279 /* Was this an NFSv2 write or an NFSv3 stable write? */
@@ -1191,7 +1289,7 @@ void nfs_writeback_done(struct rpc_task *task, void *calldata)
1191 argp->stable = NFS_FILE_SYNC; 1289 argp->stable = NFS_FILE_SYNC;
1192 } 1290 }
1193 rpc_restart_call(task); 1291 rpc_restart_call(task);
1194 return; 1292 return -EAGAIN;
1195 } 1293 }
1196 if (time_before(complain, jiffies)) { 1294 if (time_before(complain, jiffies)) {
1197 printk(KERN_WARNING 1295 printk(KERN_WARNING
@@ -1202,11 +1300,7 @@ void nfs_writeback_done(struct rpc_task *task, void *calldata)
1202 /* Can't do anything about it except throw an error. */ 1300 /* Can't do anything about it except throw an error. */
1203 task->tk_status = -EIO; 1301 task->tk_status = -EIO;
1204 } 1302 }
1205 1303 return 0;
1206 /*
1207 * Process the nfs_page list
1208 */
1209 data->complete(data, task->tk_status);
1210} 1304}
1211 1305
1212 1306
@@ -1220,10 +1314,12 @@ void nfs_commit_release(void *wdata)
1220 * Set up the argument/result storage required for the RPC call. 1314 * Set up the argument/result storage required for the RPC call.
1221 */ 1315 */
1222static void nfs_commit_rpcsetup(struct list_head *head, 1316static void nfs_commit_rpcsetup(struct list_head *head,
1223 struct nfs_write_data *data, int how) 1317 struct nfs_write_data *data,
1318 int how)
1224{ 1319{
1225 struct nfs_page *first; 1320 struct nfs_page *first;
1226 struct inode *inode; 1321 struct inode *inode;
1322 int flags;
1227 1323
1228 /* Set up the RPC argument and reply structs 1324 /* Set up the RPC argument and reply structs
1229 * NB: take care not to mess about with data->commit et al. */ 1325 * NB: take care not to mess about with data->commit et al. */
@@ -1243,7 +1339,10 @@ static void nfs_commit_rpcsetup(struct list_head *head,
1243 data->res.fattr = &data->fattr; 1339 data->res.fattr = &data->fattr;
1244 data->res.verf = &data->verf; 1340 data->res.verf = &data->verf;
1245 nfs_fattr_init(&data->fattr); 1341 nfs_fattr_init(&data->fattr);
1246 1342
1343 /* Set up the initial task struct. */
1344 flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1345 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1247 NFS_PROTO(inode)->commit_setup(data, how); 1346 NFS_PROTO(inode)->commit_setup(data, how);
1248 1347
1249 data->task.tk_priority = flush_task_priority(how); 1348 data->task.tk_priority = flush_task_priority(how);
@@ -1284,7 +1383,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1284/* 1383/*
1285 * COMMIT call returned 1384 * COMMIT call returned
1286 */ 1385 */
1287void nfs_commit_done(struct rpc_task *task, void *calldata) 1386static void nfs_commit_done(struct rpc_task *task, void *calldata)
1288{ 1387{
1289 struct nfs_write_data *data = calldata; 1388 struct nfs_write_data *data = calldata;
1290 struct nfs_page *req; 1389 struct nfs_page *req;
@@ -1293,6 +1392,10 @@ void nfs_commit_done(struct rpc_task *task, void *calldata)
1293 dprintk("NFS: %4d nfs_commit_done (status %d)\n", 1392 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1294 task->tk_pid, task->tk_status); 1393 task->tk_pid, task->tk_status);
1295 1394
1395 /* Call the NFS version-specific code */
1396 if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1397 return;
1398
1296 while (!list_empty(&data->pages)) { 1399 while (!list_empty(&data->pages)) {
1297 req = nfs_list_entry(data->pages.next); 1400 req = nfs_list_entry(data->pages.next);
1298 nfs_list_remove_request(req); 1401 nfs_list_remove_request(req);
@@ -1326,6 +1429,16 @@ void nfs_commit_done(struct rpc_task *task, void *calldata)
1326 } 1429 }
1327 sub_page_state(nr_unstable,res); 1430 sub_page_state(nr_unstable,res);
1328} 1431}
1432
1433static const struct rpc_call_ops nfs_commit_ops = {
1434 .rpc_call_done = nfs_commit_done,
1435 .rpc_release = nfs_commit_release,
1436};
1437#else
1438static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1439{
1440 return 0;
1441}
1329#endif 1442#endif
1330 1443
1331static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, 1444static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
@@ -1333,24 +1446,16 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start,
1333{ 1446{
1334 struct nfs_inode *nfsi = NFS_I(inode); 1447 struct nfs_inode *nfsi = NFS_I(inode);
1335 LIST_HEAD(head); 1448 LIST_HEAD(head);
1336 int res, 1449 int res;
1337 error = 0;
1338 1450
1339 spin_lock(&nfsi->req_lock); 1451 spin_lock(&nfsi->req_lock);
1340 res = nfs_scan_dirty(inode, &head, idx_start, npages); 1452 res = nfs_scan_dirty(inode, &head, idx_start, npages);
1341 spin_unlock(&nfsi->req_lock); 1453 spin_unlock(&nfsi->req_lock);
1342 if (res) { 1454 if (res) {
1343 struct nfs_server *server = NFS_SERVER(inode); 1455 int error = nfs_flush_list(inode, &head, res, how);
1344 1456 if (error < 0)
1345 /* For single writes, FLUSH_STABLE is more efficient */ 1457 return error;
1346 if (res == nfsi->npages && nfsi->npages <= server->wpages) {
1347 if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize)
1348 how |= FLUSH_STABLE;
1349 }
1350 error = nfs_flush_list(&head, server->wpages, how);
1351 } 1458 }
1352 if (error < 0)
1353 return error;
1354 return res; 1459 return res;
1355} 1460}
1356 1461
@@ -1359,14 +1464,13 @@ int nfs_commit_inode(struct inode *inode, int how)
1359{ 1464{
1360 struct nfs_inode *nfsi = NFS_I(inode); 1465 struct nfs_inode *nfsi = NFS_I(inode);
1361 LIST_HEAD(head); 1466 LIST_HEAD(head);
1362 int res, 1467 int res;
1363 error = 0;
1364 1468
1365 spin_lock(&nfsi->req_lock); 1469 spin_lock(&nfsi->req_lock);
1366 res = nfs_scan_commit(inode, &head, 0, 0); 1470 res = nfs_scan_commit(inode, &head, 0, 0);
1367 spin_unlock(&nfsi->req_lock); 1471 spin_unlock(&nfsi->req_lock);
1368 if (res) { 1472 if (res) {
1369 error = nfs_commit_list(inode, &head, how); 1473 int error = nfs_commit_list(inode, &head, how);
1370 if (error < 0) 1474 if (error < 0)
1371 return error; 1475 return error;
1372 } 1476 }
@@ -1374,28 +1478,38 @@ int nfs_commit_inode(struct inode *inode, int how)
1374} 1478}
1375#endif 1479#endif
1376 1480
1377int nfs_sync_inode(struct inode *inode, unsigned long idx_start, 1481int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start,
1378 unsigned int npages, int how) 1482 unsigned int npages, int how)
1379{ 1483{
1484 struct nfs_inode *nfsi = NFS_I(inode);
1485 LIST_HEAD(head);
1380 int nocommit = how & FLUSH_NOCOMMIT; 1486 int nocommit = how & FLUSH_NOCOMMIT;
1381 int wait = how & FLUSH_WAIT; 1487 int pages, ret;
1382 int error;
1383
1384 how &= ~(FLUSH_WAIT|FLUSH_NOCOMMIT);
1385 1488
1489 how &= ~FLUSH_NOCOMMIT;
1490 spin_lock(&nfsi->req_lock);
1386 do { 1491 do {
1387 if (wait) { 1492 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1388 error = nfs_wait_on_requests(inode, idx_start, npages); 1493 if (ret != 0)
1389 if (error != 0)
1390 continue;
1391 }
1392 error = nfs_flush_inode(inode, idx_start, npages, how);
1393 if (error != 0)
1394 continue; 1494 continue;
1395 if (!nocommit) 1495 pages = nfs_scan_dirty(inode, &head, idx_start, npages);
1396 error = nfs_commit_inode(inode, how); 1496 if (pages != 0) {
1397 } while (error > 0); 1497 spin_unlock(&nfsi->req_lock);
1398 return error; 1498 ret = nfs_flush_list(inode, &head, pages, how);
1499 spin_lock(&nfsi->req_lock);
1500 continue;
1501 }
1502 if (nocommit)
1503 break;
1504 pages = nfs_scan_commit(inode, &head, 0, 0);
1505 if (pages == 0)
1506 break;
1507 spin_unlock(&nfsi->req_lock);
1508 ret = nfs_commit_list(inode, &head, how);
1509 spin_lock(&nfsi->req_lock);
1510 } while (ret >= 0);
1511 spin_unlock(&nfsi->req_lock);
1512 return ret;
1399} 1513}
1400 1514
1401int nfs_init_writepagecache(void) 1515int nfs_init_writepagecache(void)