aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorFred Isaman <iisaman@netapp.com>2012-04-20 14:47:47 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2012-04-27 14:10:37 -0400
commit6c75dc0d498caa402fb17b1bf769835a9db875c8 (patch)
treeb367bb2428c011f20b8fd47a6dd0b8603ee136ba /fs/nfs/write.c
parent4db6e0b74c0f6dfc2f9c0690e8df512e3b635983 (diff)
NFS: merge _full and _partial write rpc_ops
Decouple nfs_pgio_header and nfs_write_data, and have (possibly multiple) nfs_write_datas each take a refcount on nfs_pgio_header. For the moment keeps nfs_write_header as a way to preallocate a single nfs_write_data with the nfs_pgio_header. The code doesn't need this, and would be prettier without, but given the amount of churn I am already introducing I didn't want to play with tuning new mempools. This also fixes bug in pnfs_ld_handle_write_error. In the case of desc->pg_bsize < PAGE_CACHE_SIZE, the pages list was empty, causing replay attempt to do nothing. Signed-off-by: Fred Isaman <iisaman@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c383
1 files changed, 174 insertions, 209 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 2efae049b4f0..076075eb676c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -42,8 +42,7 @@
42static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, 42static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
43 struct inode *inode, int ioflags); 43 struct inode *inode, int ioflags);
44static void nfs_redirty_request(struct nfs_page *req); 44static void nfs_redirty_request(struct nfs_page *req);
45static const struct rpc_call_ops nfs_write_partial_ops; 45static const struct rpc_call_ops nfs_write_common_ops;
46static const struct rpc_call_ops nfs_write_full_ops;
47static const struct rpc_call_ops nfs_commit_ops; 46static const struct rpc_call_ops nfs_commit_ops;
48 47
49static struct kmem_cache *nfs_wdata_cachep; 48static struct kmem_cache *nfs_wdata_cachep;
@@ -69,26 +68,47 @@ void nfs_commit_free(struct nfs_commit_data *p)
69} 68}
70EXPORT_SYMBOL_GPL(nfs_commit_free); 69EXPORT_SYMBOL_GPL(nfs_commit_free);
71 70
72struct nfs_write_header *nfs_writehdr_alloc(unsigned int pagecount) 71struct nfs_write_header *nfs_writehdr_alloc(void)
73{ 72{
74 struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); 73 struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
75 74
76 if (p) { 75 if (p) {
77 struct nfs_pgio_header *hdr = &p->header; 76 struct nfs_pgio_header *hdr = &p->header;
78 struct nfs_write_data *data = &p->rpc_data;
79 77
80 memset(p, 0, sizeof(*p)); 78 memset(p, 0, sizeof(*p));
81 INIT_LIST_HEAD(&hdr->pages); 79 INIT_LIST_HEAD(&hdr->pages);
82 INIT_LIST_HEAD(&data->list); 80 INIT_LIST_HEAD(&hdr->rpc_list);
83 data->header = hdr; 81 spin_lock_init(&hdr->lock);
84 if (!nfs_pgarray_set(&data->pages, pagecount)) { 82 atomic_set(&hdr->refcnt, 0);
85 mempool_free(p, nfs_wdata_mempool);
86 p = NULL;
87 }
88 } 83 }
89 return p; 84 return p;
90} 85}
91 86
87struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
88 unsigned int pagecount)
89{
90 struct nfs_write_data *data, *prealloc;
91
92 prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data;
93 if (prealloc->header == NULL)
94 data = prealloc;
95 else
96 data = kzalloc(sizeof(*data), GFP_KERNEL);
97 if (!data)
98 goto out;
99
100 if (nfs_pgarray_set(&data->pages, pagecount)) {
101 data->header = hdr;
102 atomic_inc(&hdr->refcnt);
103 } else {
104 if (data != prealloc)
105 kfree(data);
106 data = NULL;
107 }
108out:
109 return data;
110}
111
92void nfs_writehdr_free(struct nfs_pgio_header *hdr) 112void nfs_writehdr_free(struct nfs_pgio_header *hdr)
93{ 113{
94 struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header); 114 struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
@@ -97,10 +117,18 @@ void nfs_writehdr_free(struct nfs_pgio_header *hdr)
97 117
98void nfs_writedata_release(struct nfs_write_data *wdata) 118void nfs_writedata_release(struct nfs_write_data *wdata)
99{ 119{
120 struct nfs_pgio_header *hdr = wdata->header;
121 struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header);
122
100 put_nfs_open_context(wdata->args.context); 123 put_nfs_open_context(wdata->args.context);
101 if (wdata->pages.pagevec != wdata->pages.page_array) 124 if (wdata->pages.pagevec != wdata->pages.page_array)
102 kfree(wdata->pages.pagevec); 125 kfree(wdata->pages.pagevec);
103 nfs_writehdr_free(wdata->header); 126 if (wdata != &write_header->rpc_data)
127 kfree(wdata);
128 else
129 wdata->header = NULL;
130 if (atomic_dec_and_test(&hdr->refcnt))
131 nfs_write_completion(hdr);
104} 132}
105 133
106static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) 134static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
@@ -511,20 +539,6 @@ int nfs_write_need_commit(struct nfs_write_data *data)
511 return data->verf.committed != NFS_FILE_SYNC; 539 return data->verf.committed != NFS_FILE_SYNC;
512} 540}
513 541
514static inline
515int nfs_reschedule_unstable_write(struct nfs_page *req,
516 struct nfs_write_data *data)
517{
518 if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
519 nfs_mark_request_commit(req, data->header->lseg);
520 return 1;
521 }
522 if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
523 nfs_mark_request_dirty(req);
524 return 1;
525 }
526 return 0;
527}
528#else 542#else
529static void 543static void
530nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) 544nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
@@ -542,13 +556,43 @@ int nfs_write_need_commit(struct nfs_write_data *data)
542 return 0; 556 return 0;
543} 557}
544 558
545static inline 559#endif
546int nfs_reschedule_unstable_write(struct nfs_page *req, 560
547 struct nfs_write_data *data) 561void nfs_write_completion(struct nfs_pgio_header *hdr)
548{ 562{
549 return 0; 563 unsigned long bytes = 0;
564
565 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
566 goto out;
567 while (!list_empty(&hdr->pages)) {
568 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
569 struct page *page = req->wb_page;
570
571 bytes += req->wb_bytes;
572 nfs_list_remove_request(req);
573 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
574 (hdr->good_bytes < bytes)) {
575 nfs_set_pageerror(page);
576 nfs_context_set_write_error(req->wb_context, hdr->error);
577 goto remove_req;
578 }
579 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
580 nfs_mark_request_dirty(req);
581 goto next;
582 }
583 if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
584 nfs_mark_request_commit(req, hdr->lseg);
585 goto next;
586 }
587remove_req:
588 nfs_inode_remove_request(req);
589next:
590 nfs_unlock_request(req);
591 nfs_end_page_writeback(page);
592 }
593out:
594 hdr->release(hdr);
550} 595}
551#endif
552 596
553#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 597#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
554static int 598static int
@@ -813,17 +857,6 @@ int nfs_updatepage(struct file *file, struct page *page,
813 return status; 857 return status;
814} 858}
815 859
816static void nfs_writepage_release(struct nfs_page *req,
817 struct nfs_write_data *data)
818{
819 struct page *page = req->wb_page;
820
821 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
822 nfs_inode_remove_request(req);
823 nfs_unlock_request(req);
824 nfs_end_page_writeback(page);
825}
826
827static int flush_task_priority(int how) 860static int flush_task_priority(int how)
828{ 861{
829 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 862 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
@@ -890,22 +923,16 @@ EXPORT_SYMBOL_GPL(nfs_initiate_write);
890/* 923/*
891 * Set up the argument/result storage required for the RPC call. 924 * Set up the argument/result storage required for the RPC call.
892 */ 925 */
893static void nfs_write_rpcsetup(struct nfs_page *req, 926static void nfs_write_rpcsetup(struct nfs_write_data *data,
894 struct nfs_write_data *data,
895 unsigned int count, unsigned int offset, 927 unsigned int count, unsigned int offset,
896 int how) 928 int how)
897{ 929{
898 struct nfs_pgio_header *hdr = data->header; 930 struct nfs_page *req = data->header->req;
899 struct inode *inode = req->wb_context->dentry->d_inode;
900 931
901 /* Set up the RPC argument and reply structs 932 /* Set up the RPC argument and reply structs
902 * NB: take care not to mess about with data->commit et al. */ 933 * NB: take care not to mess about with data->commit et al. */
903 934
904 hdr->req = req; 935 data->args.fh = NFS_FH(data->header->inode);
905 hdr->inode = inode = req->wb_context->dentry->d_inode;
906 hdr->cred = req->wb_context->cred;
907
908 data->args.fh = NFS_FH(inode);
909 data->args.offset = req_offset(req) + offset; 936 data->args.offset = req_offset(req) + offset;
910 /* pnfs_set_layoutcommit needs this */ 937 /* pnfs_set_layoutcommit needs this */
911 data->mds_offset = data->args.offset; 938 data->mds_offset = data->args.offset;
@@ -919,7 +946,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
919 case 0: 946 case 0:
920 break; 947 break;
921 case FLUSH_COND_STABLE: 948 case FLUSH_COND_STABLE:
922 if (nfs_need_commit(NFS_I(inode))) 949 if (nfs_need_commit(NFS_I(data->header->inode)))
923 break; 950 break;
924 default: 951 default:
925 data->args.stable = NFS_FILE_SYNC; 952 data->args.stable = NFS_FILE_SYNC;
@@ -950,7 +977,7 @@ static int nfs_do_multiple_writes(struct list_head *head,
950 while (!list_empty(head)) { 977 while (!list_empty(head)) {
951 int ret2; 978 int ret2;
952 979
953 data = list_entry(head->next, struct nfs_write_data, list); 980 data = list_first_entry(head, struct nfs_write_data, list);
954 list_del_init(&data->list); 981 list_del_init(&data->list);
955 982
956 ret2 = nfs_do_write(data, call_ops, how); 983 ret2 = nfs_do_write(data, call_ops, how);
@@ -973,15 +1000,26 @@ static void nfs_redirty_request(struct nfs_page *req)
973 nfs_end_page_writeback(page); 1000 nfs_end_page_writeback(page);
974} 1001}
975 1002
1003void nfs_async_write_error(struct list_head *head)
1004{
1005 struct nfs_page *req;
1006
1007 while (!list_empty(head)) {
1008 req = nfs_list_entry(head->next);
1009 nfs_list_remove_request(req);
1010 nfs_redirty_request(req);
1011 }
1012}
1013
976/* 1014/*
977 * Generate multiple small requests to write out a single 1015 * Generate multiple small requests to write out a single
978 * contiguous dirty area on one page. 1016 * contiguous dirty area on one page.
979 */ 1017 */
980static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res) 1018static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
1019 struct nfs_pgio_header *hdr)
981{ 1020{
982 struct nfs_page *req = nfs_list_entry(desc->pg_list.next); 1021 struct nfs_page *req = hdr->req;
983 struct page *page = req->wb_page; 1022 struct page *page = req->wb_page;
984 struct nfs_write_header *whdr;
985 struct nfs_write_data *data; 1023 struct nfs_write_data *data;
986 size_t wsize = desc->pg_bsize, nbytes; 1024 size_t wsize = desc->pg_bsize, nbytes;
987 unsigned int offset; 1025 unsigned int offset;
@@ -989,6 +1027,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
989 int ret = 0; 1027 int ret = 0;
990 1028
991 nfs_list_remove_request(req); 1029 nfs_list_remove_request(req);
1030 nfs_list_add_request(req, &hdr->pages);
992 1031
993 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1032 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
994 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit || 1033 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
@@ -1001,28 +1040,27 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
1001 do { 1040 do {
1002 size_t len = min(nbytes, wsize); 1041 size_t len = min(nbytes, wsize);
1003 1042
1004 whdr = nfs_writehdr_alloc(1); 1043 data = nfs_writedata_alloc(hdr, 1);
1005 if (!whdr) 1044 if (!data)
1006 goto out_bad; 1045 goto out_bad;
1007 data = &whdr->rpc_data;
1008 data->pages.pagevec[0] = page; 1046 data->pages.pagevec[0] = page;
1009 nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags); 1047 nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags);
1010 list_add(&data->list, res); 1048 list_add(&data->list, &hdr->rpc_list);
1011 requests++; 1049 requests++;
1012 nbytes -= len; 1050 nbytes -= len;
1013 offset += len; 1051 offset += len;
1014 } while (nbytes != 0); 1052 } while (nbytes != 0);
1015 atomic_set(&req->wb_complete, requests); 1053 atomic_set(&req->wb_complete, requests);
1016 desc->pg_rpc_callops = &nfs_write_partial_ops; 1054 desc->pg_rpc_callops = &nfs_write_common_ops;
1017 return ret; 1055 return ret;
1018 1056
1019out_bad: 1057out_bad:
1020 while (!list_empty(res)) { 1058 while (!list_empty(&hdr->rpc_list)) {
1021 data = list_entry(res->next, struct nfs_write_data, list); 1059 data = list_first_entry(&hdr->rpc_list, struct nfs_write_data, list);
1022 list_del(&data->list); 1060 list_del(&data->list);
1023 nfs_writedata_release(data); 1061 nfs_writedata_release(data);
1024 } 1062 }
1025 nfs_redirty_request(req); 1063 nfs_async_write_error(&hdr->pages);
1026 return -ENOMEM; 1064 return -ENOMEM;
1027} 1065}
1028 1066
@@ -1034,64 +1072,74 @@ out_bad:
1034 * This is the case if nfs_updatepage detects a conflicting request 1072 * This is the case if nfs_updatepage detects a conflicting request
1035 * that has been written but not committed. 1073 * that has been written but not committed.
1036 */ 1074 */
1037static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res) 1075static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
1076 struct nfs_pgio_header *hdr)
1038{ 1077{
1039 struct nfs_page *req; 1078 struct nfs_page *req;
1040 struct page **pages; 1079 struct page **pages;
1041 struct nfs_write_header *whdr;
1042 struct nfs_write_data *data; 1080 struct nfs_write_data *data;
1043 struct list_head *head = &desc->pg_list; 1081 struct list_head *head = &desc->pg_list;
1044 int ret = 0; 1082 int ret = 0;
1045 1083
1046 whdr = nfs_writehdr_alloc(nfs_page_array_len(desc->pg_base, 1084 data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
1047 desc->pg_count)); 1085 desc->pg_count));
1048 if (!whdr) { 1086 if (!data) {
1049 while (!list_empty(head)) { 1087 nfs_async_write_error(head);
1050 req = nfs_list_entry(head->next);
1051 nfs_list_remove_request(req);
1052 nfs_redirty_request(req);
1053 }
1054 ret = -ENOMEM; 1088 ret = -ENOMEM;
1055 goto out; 1089 goto out;
1056 } 1090 }
1057 data = &whdr->rpc_data; 1091
1058 pages = data->pages.pagevec; 1092 pages = data->pages.pagevec;
1059 while (!list_empty(head)) { 1093 while (!list_empty(head)) {
1060 req = nfs_list_entry(head->next); 1094 req = nfs_list_entry(head->next);
1061 nfs_list_remove_request(req); 1095 nfs_list_remove_request(req);
1062 nfs_list_add_request(req, &whdr->header.pages); 1096 nfs_list_add_request(req, &hdr->pages);
1063 *pages++ = req->wb_page; 1097 *pages++ = req->wb_page;
1064 } 1098 }
1065 req = nfs_list_entry(whdr->header.pages.next);
1066 1099
1067 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1100 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1068 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) 1101 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
1069 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 1102 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1070 1103
1071 /* Set up the argument struct */ 1104 /* Set up the argument struct */
1072 nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags); 1105 nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags);
1073 list_add(&data->list, res); 1106 list_add(&data->list, &hdr->rpc_list);
1074 desc->pg_rpc_callops = &nfs_write_full_ops; 1107 desc->pg_rpc_callops = &nfs_write_common_ops;
1075out: 1108out:
1076 return ret; 1109 return ret;
1077} 1110}
1078 1111
1079int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head) 1112int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
1113 struct nfs_pgio_header *hdr)
1080{ 1114{
1081 if (desc->pg_bsize < PAGE_CACHE_SIZE) 1115 if (desc->pg_bsize < PAGE_CACHE_SIZE)
1082 return nfs_flush_multi(desc, head); 1116 return nfs_flush_multi(desc, hdr);
1083 return nfs_flush_one(desc, head); 1117 return nfs_flush_one(desc, hdr);
1084} 1118}
1085 1119
1086static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) 1120static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1087{ 1121{
1088 LIST_HEAD(head); 1122 struct nfs_write_header *whdr;
1123 struct nfs_pgio_header *hdr;
1089 int ret; 1124 int ret;
1090 1125
1091 ret = nfs_generic_flush(desc, &head); 1126 whdr = nfs_writehdr_alloc();
1127 if (!whdr) {
1128 nfs_async_write_error(&desc->pg_list);
1129 return -ENOMEM;
1130 }
1131 hdr = &whdr->header;
1132 nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
1133 atomic_inc(&hdr->refcnt);
1134 ret = nfs_generic_flush(desc, hdr);
1092 if (ret == 0) 1135 if (ret == 0)
1093 ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops, 1136 ret = nfs_do_multiple_writes(&hdr->rpc_list,
1094 desc->pg_ioflags); 1137 desc->pg_rpc_callops,
1138 desc->pg_ioflags);
1139 else
1140 set_bit(NFS_IOHDR_REDO, &hdr->flags);
1141 if (atomic_dec_and_test(&hdr->refcnt))
1142 nfs_write_completion(hdr);
1095 return ret; 1143 return ret;
1096} 1144}
1097 1145
@@ -1121,62 +1169,6 @@ static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1121 nfs_pageio_init_write_mds(pgio, inode, ioflags); 1169 nfs_pageio_init_write_mds(pgio, inode, ioflags);
1122} 1170}
1123 1171
1124/*
1125 * Handle a write reply that flushed part of a page.
1126 */
1127static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1128{
1129 struct nfs_write_data *data = calldata;
1130
1131 dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1132 task->tk_pid,
1133 data->header->inode->i_sb->s_id,
1134 (long long)
1135 NFS_FILEID(data->header->inode),
1136 data->header->req->wb_bytes,
1137 (long long)req_offset(data->header->req));
1138
1139 nfs_writeback_done(task, data);
1140}
1141
1142static void nfs_writeback_release_partial(void *calldata)
1143{
1144 struct nfs_write_data *data = calldata;
1145 struct nfs_page *req = data->header->req;
1146 struct page *page = req->wb_page;
1147 int status = data->task.tk_status;
1148
1149 if (status < 0) {
1150 nfs_set_pageerror(page);
1151 nfs_context_set_write_error(req->wb_context, status);
1152 dprintk(", error = %d\n", status);
1153 goto out;
1154 }
1155
1156 if (nfs_write_need_commit(data)) {
1157 struct inode *inode = page->mapping->host;
1158
1159 spin_lock(&inode->i_lock);
1160 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1161 /* Do nothing we need to resend the writes */
1162 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1163 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1164 dprintk(" defer commit\n");
1165 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1166 set_bit(PG_NEED_RESCHED, &req->wb_flags);
1167 clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1168 dprintk(" server reboot detected\n");
1169 }
1170 spin_unlock(&inode->i_lock);
1171 } else
1172 dprintk(" OK\n");
1173
1174out:
1175 if (atomic_dec_and_test(&req->wb_complete))
1176 nfs_writepage_release(req, data);
1177 nfs_writedata_release(data);
1178}
1179
1180void nfs_write_prepare(struct rpc_task *task, void *calldata) 1172void nfs_write_prepare(struct rpc_task *task, void *calldata)
1181{ 1173{
1182 struct nfs_write_data *data = calldata; 1174 struct nfs_write_data *data = calldata;
@@ -1190,12 +1182,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1190 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1182 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1191} 1183}
1192 1184
1193static const struct rpc_call_ops nfs_write_partial_ops = {
1194 .rpc_call_prepare = nfs_write_prepare,
1195 .rpc_call_done = nfs_writeback_done_partial,
1196 .rpc_release = nfs_writeback_release_partial,
1197};
1198
1199/* 1185/*
1200 * Handle a write reply that flushes a whole page. 1186 * Handle a write reply that flushes a whole page.
1201 * 1187 *
@@ -1203,60 +1189,37 @@ static const struct rpc_call_ops nfs_write_partial_ops = {
1203 * writebacks since the page->count is kept > 1 for as long 1189 * writebacks since the page->count is kept > 1 for as long
1204 * as the page has a write request pending. 1190 * as the page has a write request pending.
1205 */ 1191 */
1206static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) 1192static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
1207{ 1193{
1208 struct nfs_write_data *data = calldata; 1194 struct nfs_write_data *data = calldata;
1209 1195
1210 nfs_writeback_done(task, data); 1196 nfs_writeback_done(task, data);
1211} 1197}
1212 1198
1213static void nfs_writeback_release_full(void *calldata) 1199static void nfs_writeback_release_common(void *calldata)
1214{ 1200{
1215 struct nfs_write_data *data = calldata; 1201 struct nfs_write_data *data = calldata;
1216 struct nfs_pgio_header *hdr = data->header; 1202 struct nfs_pgio_header *hdr = data->header;
1217 int status = data->task.tk_status; 1203 int status = data->task.tk_status;
1204 struct nfs_page *req = hdr->req;
1218 1205
1219 /* Update attributes as result of writeback. */ 1206 if ((status >= 0) && nfs_write_need_commit(data)) {
1220 while (!list_empty(&hdr->pages)) { 1207 spin_lock(&hdr->lock);
1221 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1208 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
1222 struct page *page = req->wb_page; 1209 ; /* Do nothing */
1223 1210 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
1224 nfs_list_remove_request(req);
1225
1226 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1227 data->task.tk_pid,
1228 req->wb_context->dentry->d_inode->i_sb->s_id,
1229 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1230 req->wb_bytes,
1231 (long long)req_offset(req));
1232
1233 if (status < 0) {
1234 nfs_set_pageerror(page);
1235 nfs_context_set_write_error(req->wb_context, status);
1236 dprintk(", error = %d\n", status);
1237 goto remove_request;
1238 }
1239
1240 if (nfs_write_need_commit(data)) {
1241 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); 1211 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1242 nfs_mark_request_commit(req, hdr->lseg); 1212 else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf)))
1243 dprintk(" marked for commit\n"); 1213 set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
1244 goto next; 1214 spin_unlock(&hdr->lock);
1245 }
1246 dprintk(" OK\n");
1247remove_request:
1248 nfs_inode_remove_request(req);
1249 next:
1250 nfs_unlock_request(req);
1251 nfs_end_page_writeback(page);
1252 } 1215 }
1253 nfs_writedata_release(data); 1216 nfs_writedata_release(data);
1254} 1217}
1255 1218
1256static const struct rpc_call_ops nfs_write_full_ops = { 1219static const struct rpc_call_ops nfs_write_common_ops = {
1257 .rpc_call_prepare = nfs_write_prepare, 1220 .rpc_call_prepare = nfs_write_prepare,
1258 .rpc_call_done = nfs_writeback_done_full, 1221 .rpc_call_done = nfs_writeback_done_common,
1259 .rpc_release = nfs_writeback_release_full, 1222 .rpc_release = nfs_writeback_release_common,
1260}; 1223};
1261 1224
1262 1225
@@ -1307,38 +1270,40 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1307 } 1270 }
1308 } 1271 }
1309#endif 1272#endif
1310 /* Is this a short write? */ 1273 if (task->tk_status < 0)
1311 if (task->tk_status >= 0 && resp->count < argp->count) { 1274 nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
1275 else if (resp->count < argp->count) {
1312 static unsigned long complain; 1276 static unsigned long complain;
1313 1277
1278 /* This a short write! */
1314 nfs_inc_stats(inode, NFSIOS_SHORTWRITE); 1279 nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
1315 1280
1316 /* Has the server at least made some progress? */ 1281 /* Has the server at least made some progress? */
1317 if (resp->count != 0) { 1282 if (resp->count == 0) {
1318 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1283 if (time_before(complain, jiffies)) {
1319 if (resp->verf->committed != NFS_UNSTABLE) { 1284 printk(KERN_WARNING
1320 /* Resend from where the server left off */ 1285 "NFS: Server wrote zero bytes, expected %u.\n",
1321 data->mds_offset += resp->count; 1286 argp->count);
1322 argp->offset += resp->count; 1287 complain = jiffies + 300 * HZ;
1323 argp->pgbase += resp->count;
1324 argp->count -= resp->count;
1325 } else {
1326 /* Resend as a stable write in order to avoid
1327 * headaches in the case of a server crash.
1328 */
1329 argp->stable = NFS_FILE_SYNC;
1330 } 1288 }
1331 rpc_restart_call_prepare(task); 1289 nfs_set_pgio_error(data->header, -EIO, argp->offset);
1290 task->tk_status = -EIO;
1332 return; 1291 return;
1333 } 1292 }
1334 if (time_before(complain, jiffies)) { 1293 /* Was this an NFSv2 write or an NFSv3 stable write? */
1335 printk(KERN_WARNING 1294 if (resp->verf->committed != NFS_UNSTABLE) {
1336 "NFS: Server wrote zero bytes, expected %u.\n", 1295 /* Resend from where the server left off */
1337 argp->count); 1296 data->mds_offset += resp->count;
1338 complain = jiffies + 300 * HZ; 1297 argp->offset += resp->count;
1298 argp->pgbase += resp->count;
1299 argp->count -= resp->count;
1300 } else {
1301 /* Resend as a stable write in order to avoid
1302 * headaches in the case of a server crash.
1303 */
1304 argp->stable = NFS_FILE_SYNC;
1339 } 1305 }
1340 /* Can't do anything about it except throw an error. */ 1306 rpc_restart_call_prepare(task);
1341 task->tk_status = -EIO;
1342 } 1307 }
1343} 1308}
1344 1309