diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-09-11 08:46:53 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-09-11 08:46:53 -0400 |
commit | 336879b1da97fffc097f77c6d6f818660f2826f0 (patch) | |
tree | 4ddb4d1c5d2b67fb096c72e41d2a03b01a605041 /fs/nfs/write.c | |
parent | 3d3cbd84300e7be1e53083cac0f6f9c12978ecb4 (diff) | |
parent | fdcaa1dbb7c6ed419b10fb8cdb5001ab0a00538f (diff) |
Merge remote-tracking branch 'airlied/drm-next' into topic/vblank-rework
Dave asked me to do the backmerge before sending him the revised pull
request, so here we go. Nothing fancy in the conflicts, just a few
things changed right next to each another.
Conflicts:
drivers/gpu/drm/drm_irq.c
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 167 |
1 files changed, 94 insertions, 73 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5e2f10304548..175d5d073ccf 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -47,6 +47,8 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; | |||
47 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; | 47 | static const struct nfs_commit_completion_ops nfs_commit_completion_ops; |
48 | static const struct nfs_rw_ops nfs_rw_write_ops; | 48 | static const struct nfs_rw_ops nfs_rw_write_ops; |
49 | static void nfs_clear_request_commit(struct nfs_page *req); | 49 | static void nfs_clear_request_commit(struct nfs_page *req); |
50 | static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, | ||
51 | struct inode *inode); | ||
50 | 52 | ||
51 | static struct kmem_cache *nfs_wdata_cachep; | 53 | static struct kmem_cache *nfs_wdata_cachep; |
52 | static mempool_t *nfs_wdata_mempool; | 54 | static mempool_t *nfs_wdata_mempool; |
@@ -71,18 +73,18 @@ void nfs_commit_free(struct nfs_commit_data *p) | |||
71 | } | 73 | } |
72 | EXPORT_SYMBOL_GPL(nfs_commit_free); | 74 | EXPORT_SYMBOL_GPL(nfs_commit_free); |
73 | 75 | ||
74 | static struct nfs_rw_header *nfs_writehdr_alloc(void) | 76 | static struct nfs_pgio_header *nfs_writehdr_alloc(void) |
75 | { | 77 | { |
76 | struct nfs_rw_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); | 78 | struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); |
77 | 79 | ||
78 | if (p) | 80 | if (p) |
79 | memset(p, 0, sizeof(*p)); | 81 | memset(p, 0, sizeof(*p)); |
80 | return p; | 82 | return p; |
81 | } | 83 | } |
82 | 84 | ||
83 | static void nfs_writehdr_free(struct nfs_rw_header *whdr) | 85 | static void nfs_writehdr_free(struct nfs_pgio_header *hdr) |
84 | { | 86 | { |
85 | mempool_free(whdr, nfs_wdata_mempool); | 87 | mempool_free(hdr, nfs_wdata_mempool); |
86 | } | 88 | } |
87 | 89 | ||
88 | static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) | 90 | static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) |
@@ -93,6 +95,38 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) | |||
93 | } | 95 | } |
94 | 96 | ||
95 | /* | 97 | /* |
98 | * nfs_page_search_commits_for_head_request_locked | ||
99 | * | ||
100 | * Search through commit lists on @inode for the head request for @page. | ||
101 | * Must be called while holding the inode (which is cinfo) lock. | ||
102 | * | ||
103 | * Returns the head request if found, or NULL if not found. | ||
104 | */ | ||
105 | static struct nfs_page * | ||
106 | nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, | ||
107 | struct page *page) | ||
108 | { | ||
109 | struct nfs_page *freq, *t; | ||
110 | struct nfs_commit_info cinfo; | ||
111 | struct inode *inode = &nfsi->vfs_inode; | ||
112 | |||
113 | nfs_init_cinfo_from_inode(&cinfo, inode); | ||
114 | |||
115 | /* search through pnfs commit lists */ | ||
116 | freq = pnfs_search_commit_reqs(inode, &cinfo, page); | ||
117 | if (freq) | ||
118 | return freq->wb_head; | ||
119 | |||
120 | /* Linearly search the commit list for the correct request */ | ||
121 | list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { | ||
122 | if (freq->wb_page == page) | ||
123 | return freq->wb_head; | ||
124 | } | ||
125 | |||
126 | return NULL; | ||
127 | } | ||
128 | |||
129 | /* | ||
96 | * nfs_page_find_head_request_locked - find head request associated with @page | 130 | * nfs_page_find_head_request_locked - find head request associated with @page |
97 | * | 131 | * |
98 | * must be called while holding the inode lock. | 132 | * must be called while holding the inode lock. |
@@ -106,21 +140,12 @@ nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page) | |||
106 | 140 | ||
107 | if (PagePrivate(page)) | 141 | if (PagePrivate(page)) |
108 | req = (struct nfs_page *)page_private(page); | 142 | req = (struct nfs_page *)page_private(page); |
109 | else if (unlikely(PageSwapCache(page))) { | 143 | else if (unlikely(PageSwapCache(page))) |
110 | struct nfs_page *freq, *t; | 144 | req = nfs_page_search_commits_for_head_request_locked(nfsi, |
111 | 145 | page); | |
112 | /* Linearly search the commit list for the correct req */ | ||
113 | list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { | ||
114 | if (freq->wb_page == page) { | ||
115 | req = freq->wb_head; | ||
116 | break; | ||
117 | } | ||
118 | } | ||
119 | } | ||
120 | 146 | ||
121 | if (req) { | 147 | if (req) { |
122 | WARN_ON_ONCE(req->wb_head != req); | 148 | WARN_ON_ONCE(req->wb_head != req); |
123 | |||
124 | kref_get(&req->wb_kref); | 149 | kref_get(&req->wb_kref); |
125 | } | 150 | } |
126 | 151 | ||
@@ -216,7 +241,7 @@ static bool nfs_page_group_covers_page(struct nfs_page *req) | |||
216 | unsigned int pos = 0; | 241 | unsigned int pos = 0; |
217 | unsigned int len = nfs_page_length(req->wb_page); | 242 | unsigned int len = nfs_page_length(req->wb_page); |
218 | 243 | ||
219 | nfs_page_group_lock(req); | 244 | nfs_page_group_lock(req, false); |
220 | 245 | ||
221 | do { | 246 | do { |
222 | tmp = nfs_page_group_search_locked(req->wb_head, pos); | 247 | tmp = nfs_page_group_search_locked(req->wb_head, pos); |
@@ -379,8 +404,6 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, | |||
379 | subreq->wb_head = subreq; | 404 | subreq->wb_head = subreq; |
380 | subreq->wb_this_page = subreq; | 405 | subreq->wb_this_page = subreq; |
381 | 406 | ||
382 | nfs_clear_request_commit(subreq); | ||
383 | |||
384 | /* subreq is now totally disconnected from page group or any | 407 | /* subreq is now totally disconnected from page group or any |
385 | * write / commit lists. last chance to wake any waiters */ | 408 | * write / commit lists. last chance to wake any waiters */ |
386 | nfs_unlock_request(subreq); | 409 | nfs_unlock_request(subreq); |
@@ -455,8 +478,23 @@ try_again: | |||
455 | return NULL; | 478 | return NULL; |
456 | } | 479 | } |
457 | 480 | ||
481 | /* holding inode lock, so always make a non-blocking call to try the | ||
482 | * page group lock */ | ||
483 | ret = nfs_page_group_lock(head, true); | ||
484 | if (ret < 0) { | ||
485 | spin_unlock(&inode->i_lock); | ||
486 | |||
487 | if (!nonblock && ret == -EAGAIN) { | ||
488 | nfs_page_group_lock_wait(head); | ||
489 | nfs_release_request(head); | ||
490 | goto try_again; | ||
491 | } | ||
492 | |||
493 | nfs_release_request(head); | ||
494 | return ERR_PTR(ret); | ||
495 | } | ||
496 | |||
458 | /* lock each request in the page group */ | 497 | /* lock each request in the page group */ |
459 | nfs_page_group_lock(head); | ||
460 | subreq = head; | 498 | subreq = head; |
461 | do { | 499 | do { |
462 | /* | 500 | /* |
@@ -488,7 +526,7 @@ try_again: | |||
488 | * Commit list removal accounting is done after locks are dropped */ | 526 | * Commit list removal accounting is done after locks are dropped */ |
489 | subreq = head; | 527 | subreq = head; |
490 | do { | 528 | do { |
491 | nfs_list_remove_request(subreq); | 529 | nfs_clear_request_commit(subreq); |
492 | subreq = subreq->wb_this_page; | 530 | subreq = subreq->wb_this_page; |
493 | } while (subreq != head); | 531 | } while (subreq != head); |
494 | 532 | ||
@@ -518,15 +556,11 @@ try_again: | |||
518 | 556 | ||
519 | nfs_page_group_unlock(head); | 557 | nfs_page_group_unlock(head); |
520 | 558 | ||
521 | /* drop lock to clear_request_commit the head req and clean up | 559 | /* drop lock to clean uprequests on destroy list */ |
522 | * requests on destroy list */ | ||
523 | spin_unlock(&inode->i_lock); | 560 | spin_unlock(&inode->i_lock); |
524 | 561 | ||
525 | nfs_destroy_unlinked_subrequests(destroy_list, head); | 562 | nfs_destroy_unlinked_subrequests(destroy_list, head); |
526 | 563 | ||
527 | /* clean up commit list state */ | ||
528 | nfs_clear_request_commit(head); | ||
529 | |||
530 | /* still holds ref on head from nfs_page_find_head_request_locked | 564 | /* still holds ref on head from nfs_page_find_head_request_locked |
531 | * and still has lock on head from lock loop */ | 565 | * and still has lock on head from lock loop */ |
532 | return head; | 566 | return head; |
@@ -623,7 +657,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
623 | int err; | 657 | int err; |
624 | 658 | ||
625 | /* Stop dirtying of new pages while we sync */ | 659 | /* Stop dirtying of new pages while we sync */ |
626 | err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING, | 660 | err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING, |
627 | nfs_wait_bit_killable, TASK_KILLABLE); | 661 | nfs_wait_bit_killable, TASK_KILLABLE); |
628 | if (err) | 662 | if (err) |
629 | goto out_err; | 663 | goto out_err; |
@@ -705,6 +739,8 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
705 | 739 | ||
706 | if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) | 740 | if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) |
707 | nfs_release_request(req); | 741 | nfs_release_request(req); |
742 | else | ||
743 | WARN_ON_ONCE(1); | ||
708 | } | 744 | } |
709 | 745 | ||
710 | static void | 746 | static void |
@@ -808,6 +844,7 @@ nfs_clear_page_commit(struct page *page) | |||
808 | dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); | 844 | dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); |
809 | } | 845 | } |
810 | 846 | ||
847 | /* Called holding inode (/cinfo) lock */ | ||
811 | static void | 848 | static void |
812 | nfs_clear_request_commit(struct nfs_page *req) | 849 | nfs_clear_request_commit(struct nfs_page *req) |
813 | { | 850 | { |
@@ -817,20 +854,17 @@ nfs_clear_request_commit(struct nfs_page *req) | |||
817 | 854 | ||
818 | nfs_init_cinfo_from_inode(&cinfo, inode); | 855 | nfs_init_cinfo_from_inode(&cinfo, inode); |
819 | if (!pnfs_clear_request_commit(req, &cinfo)) { | 856 | if (!pnfs_clear_request_commit(req, &cinfo)) { |
820 | spin_lock(cinfo.lock); | ||
821 | nfs_request_remove_commit_list(req, &cinfo); | 857 | nfs_request_remove_commit_list(req, &cinfo); |
822 | spin_unlock(cinfo.lock); | ||
823 | } | 858 | } |
824 | nfs_clear_page_commit(req->wb_page); | 859 | nfs_clear_page_commit(req->wb_page); |
825 | } | 860 | } |
826 | } | 861 | } |
827 | 862 | ||
828 | static inline | 863 | int nfs_write_need_commit(struct nfs_pgio_header *hdr) |
829 | int nfs_write_need_commit(struct nfs_pgio_data *data) | ||
830 | { | 864 | { |
831 | if (data->verf.committed == NFS_DATA_SYNC) | 865 | if (hdr->verf.committed == NFS_DATA_SYNC) |
832 | return data->header->lseg == NULL; | 866 | return hdr->lseg == NULL; |
833 | return data->verf.committed != NFS_FILE_SYNC; | 867 | return hdr->verf.committed != NFS_FILE_SYNC; |
834 | } | 868 | } |
835 | 869 | ||
836 | #else | 870 | #else |
@@ -856,8 +890,7 @@ nfs_clear_request_commit(struct nfs_page *req) | |||
856 | { | 890 | { |
857 | } | 891 | } |
858 | 892 | ||
859 | static inline | 893 | int nfs_write_need_commit(struct nfs_pgio_header *hdr) |
860 | int nfs_write_need_commit(struct nfs_pgio_data *data) | ||
861 | { | 894 | { |
862 | return 0; | 895 | return 0; |
863 | } | 896 | } |
@@ -883,11 +916,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
883 | nfs_context_set_write_error(req->wb_context, hdr->error); | 916 | nfs_context_set_write_error(req->wb_context, hdr->error); |
884 | goto remove_req; | 917 | goto remove_req; |
885 | } | 918 | } |
886 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) { | 919 | if (nfs_write_need_commit(hdr)) { |
887 | nfs_mark_request_dirty(req); | ||
888 | goto next; | ||
889 | } | ||
890 | if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { | ||
891 | memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); | 920 | memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); |
892 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); | 921 | nfs_mark_request_commit(req, hdr->lseg, &cinfo); |
893 | goto next; | 922 | goto next; |
@@ -1038,9 +1067,9 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
1038 | else | 1067 | else |
1039 | req->wb_bytes = rqend - req->wb_offset; | 1068 | req->wb_bytes = rqend - req->wb_offset; |
1040 | out_unlock: | 1069 | out_unlock: |
1041 | spin_unlock(&inode->i_lock); | ||
1042 | if (req) | 1070 | if (req) |
1043 | nfs_clear_request_commit(req); | 1071 | nfs_clear_request_commit(req); |
1072 | spin_unlock(&inode->i_lock); | ||
1044 | return req; | 1073 | return req; |
1045 | out_flushme: | 1074 | out_flushme: |
1046 | spin_unlock(&inode->i_lock); | 1075 | spin_unlock(&inode->i_lock); |
@@ -1241,17 +1270,18 @@ static int flush_task_priority(int how) | |||
1241 | return RPC_PRIORITY_NORMAL; | 1270 | return RPC_PRIORITY_NORMAL; |
1242 | } | 1271 | } |
1243 | 1272 | ||
1244 | static void nfs_initiate_write(struct nfs_pgio_data *data, struct rpc_message *msg, | 1273 | static void nfs_initiate_write(struct nfs_pgio_header *hdr, |
1274 | struct rpc_message *msg, | ||
1245 | struct rpc_task_setup *task_setup_data, int how) | 1275 | struct rpc_task_setup *task_setup_data, int how) |
1246 | { | 1276 | { |
1247 | struct inode *inode = data->header->inode; | 1277 | struct inode *inode = hdr->inode; |
1248 | int priority = flush_task_priority(how); | 1278 | int priority = flush_task_priority(how); |
1249 | 1279 | ||
1250 | task_setup_data->priority = priority; | 1280 | task_setup_data->priority = priority; |
1251 | NFS_PROTO(inode)->write_setup(data, msg); | 1281 | NFS_PROTO(inode)->write_setup(hdr, msg); |
1252 | 1282 | ||
1253 | nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, | 1283 | nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, |
1254 | &task_setup_data->rpc_client, msg, data); | 1284 | &task_setup_data->rpc_client, msg, hdr); |
1255 | } | 1285 | } |
1256 | 1286 | ||
1257 | /* If a nfs_flush_* function fails, it should remove reqs from @head and | 1287 | /* If a nfs_flush_* function fails, it should remove reqs from @head and |
@@ -1313,21 +1343,9 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata) | |||
1313 | NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); | 1343 | NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); |
1314 | } | 1344 | } |
1315 | 1345 | ||
1316 | static void nfs_writeback_release_common(struct nfs_pgio_data *data) | 1346 | static void nfs_writeback_release_common(struct nfs_pgio_header *hdr) |
1317 | { | 1347 | { |
1318 | struct nfs_pgio_header *hdr = data->header; | 1348 | /* do nothing! */ |
1319 | int status = data->task.tk_status; | ||
1320 | |||
1321 | if ((status >= 0) && nfs_write_need_commit(data)) { | ||
1322 | spin_lock(&hdr->lock); | ||
1323 | if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) | ||
1324 | ; /* Do nothing */ | ||
1325 | else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) | ||
1326 | memcpy(&hdr->verf, &data->verf, sizeof(hdr->verf)); | ||
1327 | else if (memcmp(&hdr->verf, &data->verf, sizeof(hdr->verf))) | ||
1328 | set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); | ||
1329 | spin_unlock(&hdr->lock); | ||
1330 | } | ||
1331 | } | 1349 | } |
1332 | 1350 | ||
1333 | /* | 1351 | /* |
@@ -1358,7 +1376,8 @@ static int nfs_should_remove_suid(const struct inode *inode) | |||
1358 | /* | 1376 | /* |
1359 | * This function is called when the WRITE call is complete. | 1377 | * This function is called when the WRITE call is complete. |
1360 | */ | 1378 | */ |
1361 | static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | 1379 | static int nfs_writeback_done(struct rpc_task *task, |
1380 | struct nfs_pgio_header *hdr, | ||
1362 | struct inode *inode) | 1381 | struct inode *inode) |
1363 | { | 1382 | { |
1364 | int status; | 1383 | int status; |
@@ -1370,13 +1389,14 @@ static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | |||
1370 | * another writer had changed the file, but some applications | 1389 | * another writer had changed the file, but some applications |
1371 | * depend on tighter cache coherency when writing. | 1390 | * depend on tighter cache coherency when writing. |
1372 | */ | 1391 | */ |
1373 | status = NFS_PROTO(inode)->write_done(task, data); | 1392 | status = NFS_PROTO(inode)->write_done(task, hdr); |
1374 | if (status != 0) | 1393 | if (status != 0) |
1375 | return status; | 1394 | return status; |
1376 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, data->res.count); | 1395 | nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); |
1377 | 1396 | ||
1378 | #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) | 1397 | #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) |
1379 | if (data->res.verf->committed < data->args.stable && task->tk_status >= 0) { | 1398 | if (hdr->res.verf->committed < hdr->args.stable && |
1399 | task->tk_status >= 0) { | ||
1380 | /* We tried a write call, but the server did not | 1400 | /* We tried a write call, but the server did not |
1381 | * commit data to stable storage even though we | 1401 | * commit data to stable storage even though we |
1382 | * requested it. | 1402 | * requested it. |
@@ -1392,7 +1412,7 @@ static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | |||
1392 | dprintk("NFS: faulty NFS server %s:" | 1412 | dprintk("NFS: faulty NFS server %s:" |
1393 | " (committed = %d) != (stable = %d)\n", | 1413 | " (committed = %d) != (stable = %d)\n", |
1394 | NFS_SERVER(inode)->nfs_client->cl_hostname, | 1414 | NFS_SERVER(inode)->nfs_client->cl_hostname, |
1395 | data->res.verf->committed, data->args.stable); | 1415 | hdr->res.verf->committed, hdr->args.stable); |
1396 | complain = jiffies + 300 * HZ; | 1416 | complain = jiffies + 300 * HZ; |
1397 | } | 1417 | } |
1398 | } | 1418 | } |
@@ -1407,16 +1427,17 @@ static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, | |||
1407 | /* | 1427 | /* |
1408 | * This function is called when the WRITE call is complete. | 1428 | * This function is called when the WRITE call is complete. |
1409 | */ | 1429 | */ |
1410 | static void nfs_writeback_result(struct rpc_task *task, struct nfs_pgio_data *data) | 1430 | static void nfs_writeback_result(struct rpc_task *task, |
1431 | struct nfs_pgio_header *hdr) | ||
1411 | { | 1432 | { |
1412 | struct nfs_pgio_args *argp = &data->args; | 1433 | struct nfs_pgio_args *argp = &hdr->args; |
1413 | struct nfs_pgio_res *resp = &data->res; | 1434 | struct nfs_pgio_res *resp = &hdr->res; |
1414 | 1435 | ||
1415 | if (resp->count < argp->count) { | 1436 | if (resp->count < argp->count) { |
1416 | static unsigned long complain; | 1437 | static unsigned long complain; |
1417 | 1438 | ||
1418 | /* This a short write! */ | 1439 | /* This a short write! */ |
1419 | nfs_inc_stats(data->header->inode, NFSIOS_SHORTWRITE); | 1440 | nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); |
1420 | 1441 | ||
1421 | /* Has the server at least made some progress? */ | 1442 | /* Has the server at least made some progress? */ |
1422 | if (resp->count == 0) { | 1443 | if (resp->count == 0) { |
@@ -1426,14 +1447,14 @@ static void nfs_writeback_result(struct rpc_task *task, struct nfs_pgio_data *da | |||
1426 | argp->count); | 1447 | argp->count); |
1427 | complain = jiffies + 300 * HZ; | 1448 | complain = jiffies + 300 * HZ; |
1428 | } | 1449 | } |
1429 | nfs_set_pgio_error(data->header, -EIO, argp->offset); | 1450 | nfs_set_pgio_error(hdr, -EIO, argp->offset); |
1430 | task->tk_status = -EIO; | 1451 | task->tk_status = -EIO; |
1431 | return; | 1452 | return; |
1432 | } | 1453 | } |
1433 | /* Was this an NFSv2 write or an NFSv3 stable write? */ | 1454 | /* Was this an NFSv2 write or an NFSv3 stable write? */ |
1434 | if (resp->verf->committed != NFS_UNSTABLE) { | 1455 | if (resp->verf->committed != NFS_UNSTABLE) { |
1435 | /* Resend from where the server left off */ | 1456 | /* Resend from where the server left off */ |
1436 | data->mds_offset += resp->count; | 1457 | hdr->mds_offset += resp->count; |
1437 | argp->offset += resp->count; | 1458 | argp->offset += resp->count; |
1438 | argp->pgbase += resp->count; | 1459 | argp->pgbase += resp->count; |
1439 | argp->count -= resp->count; | 1460 | argp->count -= resp->count; |
@@ -1703,7 +1724,7 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
1703 | return error; | 1724 | return error; |
1704 | if (!may_wait) | 1725 | if (!may_wait) |
1705 | goto out_mark_dirty; | 1726 | goto out_mark_dirty; |
1706 | error = wait_on_bit(&NFS_I(inode)->flags, | 1727 | error = wait_on_bit_action(&NFS_I(inode)->flags, |
1707 | NFS_INO_COMMIT, | 1728 | NFS_INO_COMMIT, |
1708 | nfs_wait_bit_killable, | 1729 | nfs_wait_bit_killable, |
1709 | TASK_KILLABLE); | 1730 | TASK_KILLABLE); |
@@ -1884,7 +1905,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | |||
1884 | int __init nfs_init_writepagecache(void) | 1905 | int __init nfs_init_writepagecache(void) |
1885 | { | 1906 | { |
1886 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | 1907 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |
1887 | sizeof(struct nfs_rw_header), | 1908 | sizeof(struct nfs_pgio_header), |
1888 | 0, SLAB_HWCACHE_ALIGN, | 1909 | 0, SLAB_HWCACHE_ALIGN, |
1889 | NULL); | 1910 | NULL); |
1890 | if (nfs_wdata_cachep == NULL) | 1911 | if (nfs_wdata_cachep == NULL) |