diff options
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 213 |
1 files changed, 122 insertions, 91 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 834f0fe96f89..2c68818f68ac 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -100,7 +100,6 @@ void nfs_writedata_free(struct nfs_write_data *p) | |||
100 | 100 | ||
101 | void nfs_writedata_release(struct nfs_write_data *wdata) | 101 | void nfs_writedata_release(struct nfs_write_data *wdata) |
102 | { | 102 | { |
103 | put_lseg(wdata->lseg); | ||
104 | put_nfs_open_context(wdata->args.context); | 103 | put_nfs_open_context(wdata->args.context); |
105 | nfs_writedata_free(wdata); | 104 | nfs_writedata_free(wdata); |
106 | } | 105 | } |
@@ -236,10 +235,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo | |||
236 | req = nfs_page_find_request_locked(page); | 235 | req = nfs_page_find_request_locked(page); |
237 | if (req == NULL) | 236 | if (req == NULL) |
238 | break; | 237 | break; |
239 | if (nfs_set_page_tag_locked(req)) | 238 | if (nfs_lock_request_dontget(req)) |
240 | break; | 239 | break; |
241 | /* Note: If we hold the page lock, as is the case in nfs_writepage, | 240 | /* Note: If we hold the page lock, as is the case in nfs_writepage, |
242 | * then the call to nfs_set_page_tag_locked() will always | 241 | * then the call to nfs_lock_request_dontget() will always |
243 | * succeed provided that someone hasn't already marked the | 242 | * succeed provided that someone hasn't already marked the |
244 | * request as dirty (in which case we don't care). | 243 | * request as dirty (in which case we don't care). |
245 | */ | 244 | */ |
@@ -375,21 +374,14 @@ out_err: | |||
375 | /* | 374 | /* |
376 | * Insert a write request into an inode | 375 | * Insert a write request into an inode |
377 | */ | 376 | */ |
378 | static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | 377 | static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) |
379 | { | 378 | { |
380 | struct nfs_inode *nfsi = NFS_I(inode); | 379 | struct nfs_inode *nfsi = NFS_I(inode); |
381 | int error; | ||
382 | |||
383 | error = radix_tree_preload(GFP_NOFS); | ||
384 | if (error != 0) | ||
385 | goto out; | ||
386 | 380 | ||
387 | /* Lock the request! */ | 381 | /* Lock the request! */ |
388 | nfs_lock_request_dontget(req); | 382 | nfs_lock_request_dontget(req); |
389 | 383 | ||
390 | spin_lock(&inode->i_lock); | 384 | spin_lock(&inode->i_lock); |
391 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); | ||
392 | BUG_ON(error); | ||
393 | if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE)) | 385 | if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE)) |
394 | inode->i_version++; | 386 | inode->i_version++; |
395 | set_bit(PG_MAPPED, &req->wb_flags); | 387 | set_bit(PG_MAPPED, &req->wb_flags); |
@@ -397,12 +389,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
397 | set_page_private(req->wb_page, (unsigned long)req); | 389 | set_page_private(req->wb_page, (unsigned long)req); |
398 | nfsi->npages++; | 390 | nfsi->npages++; |
399 | kref_get(&req->wb_kref); | 391 | kref_get(&req->wb_kref); |
400 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, | ||
401 | NFS_PAGE_TAG_LOCKED); | ||
402 | spin_unlock(&inode->i_lock); | 392 | spin_unlock(&inode->i_lock); |
403 | radix_tree_preload_end(); | ||
404 | out: | ||
405 | return error; | ||
406 | } | 393 | } |
407 | 394 | ||
408 | /* | 395 | /* |
@@ -419,7 +406,6 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
419 | set_page_private(req->wb_page, 0); | 406 | set_page_private(req->wb_page, 0); |
420 | ClearPagePrivate(req->wb_page); | 407 | ClearPagePrivate(req->wb_page); |
421 | clear_bit(PG_MAPPED, &req->wb_flags); | 408 | clear_bit(PG_MAPPED, &req->wb_flags); |
422 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); | ||
423 | nfsi->npages--; | 409 | nfsi->npages--; |
424 | spin_unlock(&inode->i_lock); | 410 | spin_unlock(&inode->i_lock); |
425 | nfs_release_request(req); | 411 | nfs_release_request(req); |
@@ -432,39 +418,90 @@ nfs_mark_request_dirty(struct nfs_page *req) | |||
432 | } | 418 | } |
433 | 419 | ||
434 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 420 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
435 | /* | 421 | /** |
436 | * Add a request to the inode's commit list. | 422 | * nfs_request_add_commit_list - add request to a commit list |
423 | * @req: pointer to a struct nfs_page | ||
424 | * @head: commit list head | ||
425 | * | ||
426 | * This sets the PG_CLEAN bit, updates the inode global count of | ||
427 | * number of outstanding requests requiring a commit as well as | ||
428 | * the MM page stats. | ||
429 | * | ||
430 | * The caller must _not_ hold the inode->i_lock, but must be | ||
431 | * holding the nfs_page lock. | ||
437 | */ | 432 | */ |
438 | static void | 433 | void |
439 | nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) | 434 | nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head) |
440 | { | 435 | { |
441 | struct inode *inode = req->wb_context->dentry->d_inode; | 436 | struct inode *inode = req->wb_context->dentry->d_inode; |
442 | struct nfs_inode *nfsi = NFS_I(inode); | ||
443 | 437 | ||
444 | spin_lock(&inode->i_lock); | ||
445 | set_bit(PG_CLEAN, &(req)->wb_flags); | 438 | set_bit(PG_CLEAN, &(req)->wb_flags); |
446 | radix_tree_tag_set(&nfsi->nfs_page_tree, | 439 | spin_lock(&inode->i_lock); |
447 | req->wb_index, | 440 | nfs_list_add_request(req, head); |
448 | NFS_PAGE_TAG_COMMIT); | 441 | NFS_I(inode)->ncommit++; |
449 | nfsi->ncommit++; | ||
450 | spin_unlock(&inode->i_lock); | 442 | spin_unlock(&inode->i_lock); |
451 | pnfs_mark_request_commit(req, lseg); | ||
452 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 443 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
453 | inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); | 444 | inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); |
454 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 445 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
455 | } | 446 | } |
447 | EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); | ||
456 | 448 | ||
457 | static int | 449 | /** |
450 | * nfs_request_remove_commit_list - Remove request from a commit list | ||
451 | * @req: pointer to a nfs_page | ||
452 | * | ||
453 | * This clears the PG_CLEAN bit, and updates the inode global count of | ||
454 | * number of outstanding requests requiring a commit | ||
455 | * It does not update the MM page stats. | ||
456 | * | ||
457 | * The caller _must_ hold the inode->i_lock and the nfs_page lock. | ||
458 | */ | ||
459 | void | ||
460 | nfs_request_remove_commit_list(struct nfs_page *req) | ||
461 | { | ||
462 | struct inode *inode = req->wb_context->dentry->d_inode; | ||
463 | |||
464 | if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) | ||
465 | return; | ||
466 | nfs_list_remove_request(req); | ||
467 | NFS_I(inode)->ncommit--; | ||
468 | } | ||
469 | EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); | ||
470 | |||
471 | |||
472 | /* | ||
473 | * Add a request to the inode's commit list. | ||
474 | */ | ||
475 | static void | ||
476 | nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) | ||
477 | { | ||
478 | struct inode *inode = req->wb_context->dentry->d_inode; | ||
479 | |||
480 | if (pnfs_mark_request_commit(req, lseg)) | ||
481 | return; | ||
482 | nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list); | ||
483 | } | ||
484 | |||
485 | static void | ||
486 | nfs_clear_page_commit(struct page *page) | ||
487 | { | ||
488 | dec_zone_page_state(page, NR_UNSTABLE_NFS); | ||
489 | dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); | ||
490 | } | ||
491 | |||
492 | static void | ||
458 | nfs_clear_request_commit(struct nfs_page *req) | 493 | nfs_clear_request_commit(struct nfs_page *req) |
459 | { | 494 | { |
460 | struct page *page = req->wb_page; | 495 | if (test_bit(PG_CLEAN, &req->wb_flags)) { |
496 | struct inode *inode = req->wb_context->dentry->d_inode; | ||
461 | 497 | ||
462 | if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { | 498 | if (!pnfs_clear_request_commit(req)) { |
463 | dec_zone_page_state(page, NR_UNSTABLE_NFS); | 499 | spin_lock(&inode->i_lock); |
464 | dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); | 500 | nfs_request_remove_commit_list(req); |
465 | return 1; | 501 | spin_unlock(&inode->i_lock); |
502 | } | ||
503 | nfs_clear_page_commit(req->wb_page); | ||
466 | } | 504 | } |
467 | return 0; | ||
468 | } | 505 | } |
469 | 506 | ||
470 | static inline | 507 | static inline |
@@ -491,15 +528,14 @@ int nfs_reschedule_unstable_write(struct nfs_page *req, | |||
491 | return 0; | 528 | return 0; |
492 | } | 529 | } |
493 | #else | 530 | #else |
494 | static inline void | 531 | static void |
495 | nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) | 532 | nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) |
496 | { | 533 | { |
497 | } | 534 | } |
498 | 535 | ||
499 | static inline int | 536 | static void |
500 | nfs_clear_request_commit(struct nfs_page *req) | 537 | nfs_clear_request_commit(struct nfs_page *req) |
501 | { | 538 | { |
502 | return 0; | ||
503 | } | 539 | } |
504 | 540 | ||
505 | static inline | 541 | static inline |
@@ -520,46 +556,65 @@ int nfs_reschedule_unstable_write(struct nfs_page *req, | |||
520 | static int | 556 | static int |
521 | nfs_need_commit(struct nfs_inode *nfsi) | 557 | nfs_need_commit(struct nfs_inode *nfsi) |
522 | { | 558 | { |
523 | return radix_tree_tagged(&nfsi->nfs_page_tree, NFS_PAGE_TAG_COMMIT); | 559 | return nfsi->ncommit > 0; |
560 | } | ||
561 | |||
562 | /* i_lock held by caller */ | ||
563 | static int | ||
564 | nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max, | ||
565 | spinlock_t *lock) | ||
566 | { | ||
567 | struct nfs_page *req, *tmp; | ||
568 | int ret = 0; | ||
569 | |||
570 | list_for_each_entry_safe(req, tmp, src, wb_list) { | ||
571 | if (!nfs_lock_request(req)) | ||
572 | continue; | ||
573 | if (cond_resched_lock(lock)) | ||
574 | list_safe_reset_next(req, tmp, wb_list); | ||
575 | nfs_request_remove_commit_list(req); | ||
576 | nfs_list_add_request(req, dst); | ||
577 | ret++; | ||
578 | if (ret == max) | ||
579 | break; | ||
580 | } | ||
581 | return ret; | ||
524 | } | 582 | } |
525 | 583 | ||
526 | /* | 584 | /* |
527 | * nfs_scan_commit - Scan an inode for commit requests | 585 | * nfs_scan_commit - Scan an inode for commit requests |
528 | * @inode: NFS inode to scan | 586 | * @inode: NFS inode to scan |
529 | * @dst: destination list | 587 | * @dst: destination list |
530 | * @idx_start: lower bound of page->index to scan. | ||
531 | * @npages: idx_start + npages sets the upper bound to scan. | ||
532 | * | 588 | * |
533 | * Moves requests from the inode's 'commit' request list. | 589 | * Moves requests from the inode's 'commit' request list. |
534 | * The requests are *not* checked to ensure that they form a contiguous set. | 590 | * The requests are *not* checked to ensure that they form a contiguous set. |
535 | */ | 591 | */ |
536 | static int | 592 | static int |
537 | nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) | 593 | nfs_scan_commit(struct inode *inode, struct list_head *dst) |
538 | { | 594 | { |
539 | struct nfs_inode *nfsi = NFS_I(inode); | 595 | struct nfs_inode *nfsi = NFS_I(inode); |
540 | int ret; | 596 | int ret = 0; |
541 | |||
542 | if (!nfs_need_commit(nfsi)) | ||
543 | return 0; | ||
544 | 597 | ||
545 | spin_lock(&inode->i_lock); | 598 | spin_lock(&inode->i_lock); |
546 | ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); | 599 | if (nfsi->ncommit > 0) { |
547 | if (ret > 0) | 600 | const int max = INT_MAX; |
548 | nfsi->ncommit -= ret; | ||
549 | spin_unlock(&inode->i_lock); | ||
550 | |||
551 | if (nfs_need_commit(NFS_I(inode))) | ||
552 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | ||
553 | 601 | ||
602 | ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max, | ||
603 | &inode->i_lock); | ||
604 | ret += pnfs_scan_commit_lists(inode, max - ret, | ||
605 | &inode->i_lock); | ||
606 | } | ||
607 | spin_unlock(&inode->i_lock); | ||
554 | return ret; | 608 | return ret; |
555 | } | 609 | } |
610 | |||
556 | #else | 611 | #else |
557 | static inline int nfs_need_commit(struct nfs_inode *nfsi) | 612 | static inline int nfs_need_commit(struct nfs_inode *nfsi) |
558 | { | 613 | { |
559 | return 0; | 614 | return 0; |
560 | } | 615 | } |
561 | 616 | ||
562 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) | 617 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst) |
563 | { | 618 | { |
564 | return 0; | 619 | return 0; |
565 | } | 620 | } |
@@ -604,7 +659,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
604 | || end < req->wb_offset) | 659 | || end < req->wb_offset) |
605 | goto out_flushme; | 660 | goto out_flushme; |
606 | 661 | ||
607 | if (nfs_set_page_tag_locked(req)) | 662 | if (nfs_lock_request_dontget(req)) |
608 | break; | 663 | break; |
609 | 664 | ||
610 | /* The request is locked, so wait and then retry */ | 665 | /* The request is locked, so wait and then retry */ |
@@ -616,13 +671,6 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
616 | spin_lock(&inode->i_lock); | 671 | spin_lock(&inode->i_lock); |
617 | } | 672 | } |
618 | 673 | ||
619 | if (nfs_clear_request_commit(req) && | ||
620 | radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, | ||
621 | req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL) { | ||
622 | NFS_I(inode)->ncommit--; | ||
623 | pnfs_clear_request_commit(req); | ||
624 | } | ||
625 | |||
626 | /* Okay, the request matches. Update the region */ | 674 | /* Okay, the request matches. Update the region */ |
627 | if (offset < req->wb_offset) { | 675 | if (offset < req->wb_offset) { |
628 | req->wb_offset = offset; | 676 | req->wb_offset = offset; |
@@ -634,6 +682,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, | |||
634 | req->wb_bytes = rqend - req->wb_offset; | 682 | req->wb_bytes = rqend - req->wb_offset; |
635 | out_unlock: | 683 | out_unlock: |
636 | spin_unlock(&inode->i_lock); | 684 | spin_unlock(&inode->i_lock); |
685 | nfs_clear_request_commit(req); | ||
637 | return req; | 686 | return req; |
638 | out_flushme: | 687 | out_flushme: |
639 | spin_unlock(&inode->i_lock); | 688 | spin_unlock(&inode->i_lock); |
@@ -655,7 +704,6 @@ static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, | |||
655 | { | 704 | { |
656 | struct inode *inode = page->mapping->host; | 705 | struct inode *inode = page->mapping->host; |
657 | struct nfs_page *req; | 706 | struct nfs_page *req; |
658 | int error; | ||
659 | 707 | ||
660 | req = nfs_try_to_update_request(inode, page, offset, bytes); | 708 | req = nfs_try_to_update_request(inode, page, offset, bytes); |
661 | if (req != NULL) | 709 | if (req != NULL) |
@@ -663,11 +711,7 @@ static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, | |||
663 | req = nfs_create_request(ctx, inode, page, offset, bytes); | 711 | req = nfs_create_request(ctx, inode, page, offset, bytes); |
664 | if (IS_ERR(req)) | 712 | if (IS_ERR(req)) |
665 | goto out; | 713 | goto out; |
666 | error = nfs_inode_add_request(inode, req); | 714 | nfs_inode_add_request(inode, req); |
667 | if (error != 0) { | ||
668 | nfs_release_request(req); | ||
669 | req = ERR_PTR(error); | ||
670 | } | ||
671 | out: | 715 | out: |
672 | return req; | 716 | return req; |
673 | } | 717 | } |
@@ -684,7 +728,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | |||
684 | nfs_grow_file(page, offset, count); | 728 | nfs_grow_file(page, offset, count); |
685 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | 729 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); |
686 | nfs_mark_request_dirty(req); | 730 | nfs_mark_request_dirty(req); |
687 | nfs_clear_page_tag_locked(req); | 731 | nfs_unlock_request(req); |
688 | return 0; | 732 | return 0; |
689 | } | 733 | } |
690 | 734 | ||
@@ -777,7 +821,7 @@ static void nfs_writepage_release(struct nfs_page *req, | |||
777 | 821 | ||
778 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data)) | 822 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data)) |
779 | nfs_inode_remove_request(req); | 823 | nfs_inode_remove_request(req); |
780 | nfs_clear_page_tag_locked(req); | 824 | nfs_unlock_request(req); |
781 | nfs_end_page_writeback(page); | 825 | nfs_end_page_writeback(page); |
782 | } | 826 | } |
783 | 827 | ||
@@ -925,7 +969,7 @@ static void nfs_redirty_request(struct nfs_page *req) | |||
925 | struct page *page = req->wb_page; | 969 | struct page *page = req->wb_page; |
926 | 970 | ||
927 | nfs_mark_request_dirty(req); | 971 | nfs_mark_request_dirty(req); |
928 | nfs_clear_page_tag_locked(req); | 972 | nfs_unlock_request(req); |
929 | nfs_end_page_writeback(page); | 973 | nfs_end_page_writeback(page); |
930 | } | 974 | } |
931 | 975 | ||
@@ -1128,23 +1172,14 @@ out: | |||
1128 | nfs_writedata_release(calldata); | 1172 | nfs_writedata_release(calldata); |
1129 | } | 1173 | } |
1130 | 1174 | ||
1131 | #if defined(CONFIG_NFS_V4_1) | ||
1132 | void nfs_write_prepare(struct rpc_task *task, void *calldata) | 1175 | void nfs_write_prepare(struct rpc_task *task, void *calldata) |
1133 | { | 1176 | { |
1134 | struct nfs_write_data *data = calldata; | 1177 | struct nfs_write_data *data = calldata; |
1135 | 1178 | NFS_PROTO(data->inode)->write_rpc_prepare(task, data); | |
1136 | if (nfs4_setup_sequence(NFS_SERVER(data->inode), | ||
1137 | &data->args.seq_args, | ||
1138 | &data->res.seq_res, 1, task)) | ||
1139 | return; | ||
1140 | rpc_call_start(task); | ||
1141 | } | 1179 | } |
1142 | #endif /* CONFIG_NFS_V4_1 */ | ||
1143 | 1180 | ||
1144 | static const struct rpc_call_ops nfs_write_partial_ops = { | 1181 | static const struct rpc_call_ops nfs_write_partial_ops = { |
1145 | #if defined(CONFIG_NFS_V4_1) | ||
1146 | .rpc_call_prepare = nfs_write_prepare, | 1182 | .rpc_call_prepare = nfs_write_prepare, |
1147 | #endif /* CONFIG_NFS_V4_1 */ | ||
1148 | .rpc_call_done = nfs_writeback_done_partial, | 1183 | .rpc_call_done = nfs_writeback_done_partial, |
1149 | .rpc_release = nfs_writeback_release_partial, | 1184 | .rpc_release = nfs_writeback_release_partial, |
1150 | }; | 1185 | }; |
@@ -1199,16 +1234,14 @@ static void nfs_writeback_release_full(void *calldata) | |||
1199 | remove_request: | 1234 | remove_request: |
1200 | nfs_inode_remove_request(req); | 1235 | nfs_inode_remove_request(req); |
1201 | next: | 1236 | next: |
1202 | nfs_clear_page_tag_locked(req); | 1237 | nfs_unlock_request(req); |
1203 | nfs_end_page_writeback(page); | 1238 | nfs_end_page_writeback(page); |
1204 | } | 1239 | } |
1205 | nfs_writedata_release(calldata); | 1240 | nfs_writedata_release(calldata); |
1206 | } | 1241 | } |
1207 | 1242 | ||
1208 | static const struct rpc_call_ops nfs_write_full_ops = { | 1243 | static const struct rpc_call_ops nfs_write_full_ops = { |
1209 | #if defined(CONFIG_NFS_V4_1) | ||
1210 | .rpc_call_prepare = nfs_write_prepare, | 1244 | .rpc_call_prepare = nfs_write_prepare, |
1211 | #endif /* CONFIG_NFS_V4_1 */ | ||
1212 | .rpc_call_done = nfs_writeback_done_full, | 1245 | .rpc_call_done = nfs_writeback_done_full, |
1213 | .rpc_release = nfs_writeback_release_full, | 1246 | .rpc_release = nfs_writeback_release_full, |
1214 | }; | 1247 | }; |
@@ -1325,7 +1358,6 @@ void nfs_commitdata_release(void *data) | |||
1325 | { | 1358 | { |
1326 | struct nfs_write_data *wdata = data; | 1359 | struct nfs_write_data *wdata = data; |
1327 | 1360 | ||
1328 | put_lseg(wdata->lseg); | ||
1329 | put_nfs_open_context(wdata->args.context); | 1361 | put_nfs_open_context(wdata->args.context); |
1330 | nfs_commit_free(wdata); | 1362 | nfs_commit_free(wdata); |
1331 | } | 1363 | } |
@@ -1411,7 +1443,7 @@ void nfs_retry_commit(struct list_head *page_list, | |||
1411 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 1443 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
1412 | dec_bdi_stat(req->wb_page->mapping->backing_dev_info, | 1444 | dec_bdi_stat(req->wb_page->mapping->backing_dev_info, |
1413 | BDI_RECLAIMABLE); | 1445 | BDI_RECLAIMABLE); |
1414 | nfs_clear_page_tag_locked(req); | 1446 | nfs_unlock_request(req); |
1415 | } | 1447 | } |
1416 | } | 1448 | } |
1417 | EXPORT_SYMBOL_GPL(nfs_retry_commit); | 1449 | EXPORT_SYMBOL_GPL(nfs_retry_commit); |
@@ -1460,7 +1492,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data) | |||
1460 | while (!list_empty(&data->pages)) { | 1492 | while (!list_empty(&data->pages)) { |
1461 | req = nfs_list_entry(data->pages.next); | 1493 | req = nfs_list_entry(data->pages.next); |
1462 | nfs_list_remove_request(req); | 1494 | nfs_list_remove_request(req); |
1463 | nfs_clear_request_commit(req); | 1495 | nfs_clear_page_commit(req->wb_page); |
1464 | 1496 | ||
1465 | dprintk("NFS: commit (%s/%lld %d@%lld)", | 1497 | dprintk("NFS: commit (%s/%lld %d@%lld)", |
1466 | req->wb_context->dentry->d_sb->s_id, | 1498 | req->wb_context->dentry->d_sb->s_id, |
@@ -1486,7 +1518,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data) | |||
1486 | dprintk(" mismatch\n"); | 1518 | dprintk(" mismatch\n"); |
1487 | nfs_mark_request_dirty(req); | 1519 | nfs_mark_request_dirty(req); |
1488 | next: | 1520 | next: |
1489 | nfs_clear_page_tag_locked(req); | 1521 | nfs_unlock_request(req); |
1490 | } | 1522 | } |
1491 | } | 1523 | } |
1492 | EXPORT_SYMBOL_GPL(nfs_commit_release_pages); | 1524 | EXPORT_SYMBOL_GPL(nfs_commit_release_pages); |
@@ -1501,9 +1533,7 @@ static void nfs_commit_release(void *calldata) | |||
1501 | } | 1533 | } |
1502 | 1534 | ||
1503 | static const struct rpc_call_ops nfs_commit_ops = { | 1535 | static const struct rpc_call_ops nfs_commit_ops = { |
1504 | #if defined(CONFIG_NFS_V4_1) | ||
1505 | .rpc_call_prepare = nfs_write_prepare, | 1536 | .rpc_call_prepare = nfs_write_prepare, |
1506 | #endif /* CONFIG_NFS_V4_1 */ | ||
1507 | .rpc_call_done = nfs_commit_done, | 1537 | .rpc_call_done = nfs_commit_done, |
1508 | .rpc_release = nfs_commit_release, | 1538 | .rpc_release = nfs_commit_release, |
1509 | }; | 1539 | }; |
@@ -1517,7 +1547,7 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
1517 | res = nfs_commit_set_lock(NFS_I(inode), may_wait); | 1547 | res = nfs_commit_set_lock(NFS_I(inode), may_wait); |
1518 | if (res <= 0) | 1548 | if (res <= 0) |
1519 | goto out_mark_dirty; | 1549 | goto out_mark_dirty; |
1520 | res = nfs_scan_commit(inode, &head, 0, 0); | 1550 | res = nfs_scan_commit(inode, &head); |
1521 | if (res) { | 1551 | if (res) { |
1522 | int error; | 1552 | int error; |
1523 | 1553 | ||
@@ -1635,6 +1665,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) | |||
1635 | if (req == NULL) | 1665 | if (req == NULL) |
1636 | break; | 1666 | break; |
1637 | if (nfs_lock_request_dontget(req)) { | 1667 | if (nfs_lock_request_dontget(req)) { |
1668 | nfs_clear_request_commit(req); | ||
1638 | nfs_inode_remove_request(req); | 1669 | nfs_inode_remove_request(req); |
1639 | /* | 1670 | /* |
1640 | * In case nfs_inode_remove_request has marked the | 1671 | * In case nfs_inode_remove_request has marked the |