aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c149
1 files changed, 77 insertions, 72 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af344a158e01..73ac992ece85 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -117,19 +117,19 @@ static struct nfs_page *nfs_page_find_request_locked(struct page *page)
117 if (PagePrivate(page)) { 117 if (PagePrivate(page)) {
118 req = (struct nfs_page *)page_private(page); 118 req = (struct nfs_page *)page_private(page);
119 if (req != NULL) 119 if (req != NULL)
120 atomic_inc(&req->wb_count); 120 kref_get(&req->wb_kref);
121 } 121 }
122 return req; 122 return req;
123} 123}
124 124
125static struct nfs_page *nfs_page_find_request(struct page *page) 125static struct nfs_page *nfs_page_find_request(struct page *page)
126{ 126{
127 struct inode *inode = page->mapping->host;
127 struct nfs_page *req = NULL; 128 struct nfs_page *req = NULL;
128 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
129 129
130 spin_lock(req_lock); 130 spin_lock(&inode->i_lock);
131 req = nfs_page_find_request_locked(page); 131 req = nfs_page_find_request_locked(page);
132 spin_unlock(req_lock); 132 spin_unlock(&inode->i_lock);
133 return req; 133 return req;
134} 134}
135 135
@@ -191,8 +191,6 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
191 } 191 }
192 /* Update file length */ 192 /* Update file length */
193 nfs_grow_file(page, offset, count); 193 nfs_grow_file(page, offset, count);
194 /* Set the PG_uptodate flag? */
195 nfs_mark_uptodate(page, offset, count);
196 nfs_unlock_request(req); 194 nfs_unlock_request(req);
197 return 0; 195 return 0;
198} 196}
@@ -253,16 +251,16 @@ static void nfs_end_page_writeback(struct page *page)
253static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 251static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
254 struct page *page) 252 struct page *page)
255{ 253{
254 struct inode *inode = page->mapping->host;
255 struct nfs_inode *nfsi = NFS_I(inode);
256 struct nfs_page *req; 256 struct nfs_page *req;
257 struct nfs_inode *nfsi = NFS_I(page->mapping->host);
258 spinlock_t *req_lock = &nfsi->req_lock;
259 int ret; 257 int ret;
260 258
261 spin_lock(req_lock); 259 spin_lock(&inode->i_lock);
262 for(;;) { 260 for(;;) {
263 req = nfs_page_find_request_locked(page); 261 req = nfs_page_find_request_locked(page);
264 if (req == NULL) { 262 if (req == NULL) {
265 spin_unlock(req_lock); 263 spin_unlock(&inode->i_lock);
266 return 1; 264 return 1;
267 } 265 }
268 if (nfs_lock_request_dontget(req)) 266 if (nfs_lock_request_dontget(req))
@@ -272,28 +270,28 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
272 * succeed provided that someone hasn't already marked the 270 * succeed provided that someone hasn't already marked the
273 * request as dirty (in which case we don't care). 271 * request as dirty (in which case we don't care).
274 */ 272 */
275 spin_unlock(req_lock); 273 spin_unlock(&inode->i_lock);
276 ret = nfs_wait_on_request(req); 274 ret = nfs_wait_on_request(req);
277 nfs_release_request(req); 275 nfs_release_request(req);
278 if (ret != 0) 276 if (ret != 0)
279 return ret; 277 return ret;
280 spin_lock(req_lock); 278 spin_lock(&inode->i_lock);
281 } 279 }
282 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { 280 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
283 /* This request is marked for commit */ 281 /* This request is marked for commit */
284 spin_unlock(req_lock); 282 spin_unlock(&inode->i_lock);
285 nfs_unlock_request(req); 283 nfs_unlock_request(req);
286 nfs_pageio_complete(pgio); 284 nfs_pageio_complete(pgio);
287 return 1; 285 return 1;
288 } 286 }
289 if (nfs_set_page_writeback(page) != 0) { 287 if (nfs_set_page_writeback(page) != 0) {
290 spin_unlock(req_lock); 288 spin_unlock(&inode->i_lock);
291 BUG(); 289 BUG();
292 } 290 }
293 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, 291 radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
294 NFS_PAGE_TAG_WRITEBACK); 292 NFS_PAGE_TAG_LOCKED);
295 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); 293 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
296 spin_unlock(req_lock); 294 spin_unlock(&inode->i_lock);
297 nfs_pageio_add_request(pgio, req); 295 nfs_pageio_add_request(pgio, req);
298 return ret; 296 return ret;
299} 297}
@@ -400,7 +398,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
400 if (PageDirty(req->wb_page)) 398 if (PageDirty(req->wb_page))
401 set_bit(PG_NEED_FLUSH, &req->wb_flags); 399 set_bit(PG_NEED_FLUSH, &req->wb_flags);
402 nfsi->npages++; 400 nfsi->npages++;
403 atomic_inc(&req->wb_count); 401 kref_get(&req->wb_kref);
404 return 0; 402 return 0;
405} 403}
406 404
@@ -409,12 +407,12 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
409 */ 407 */
410static void nfs_inode_remove_request(struct nfs_page *req) 408static void nfs_inode_remove_request(struct nfs_page *req)
411{ 409{
412 struct inode *inode = req->wb_context->dentry->d_inode; 410 struct inode *inode = req->wb_context->path.dentry->d_inode;
413 struct nfs_inode *nfsi = NFS_I(inode); 411 struct nfs_inode *nfsi = NFS_I(inode);
414 412
415 BUG_ON (!NFS_WBACK_BUSY(req)); 413 BUG_ON (!NFS_WBACK_BUSY(req));
416 414
417 spin_lock(&nfsi->req_lock); 415 spin_lock(&inode->i_lock);
418 set_page_private(req->wb_page, 0); 416 set_page_private(req->wb_page, 0);
419 ClearPagePrivate(req->wb_page); 417 ClearPagePrivate(req->wb_page);
420 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 418 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
@@ -422,11 +420,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
422 __set_page_dirty_nobuffers(req->wb_page); 420 __set_page_dirty_nobuffers(req->wb_page);
423 nfsi->npages--; 421 nfsi->npages--;
424 if (!nfsi->npages) { 422 if (!nfsi->npages) {
425 spin_unlock(&nfsi->req_lock); 423 spin_unlock(&inode->i_lock);
426 nfs_end_data_update(inode); 424 nfs_end_data_update(inode);
427 iput(inode); 425 iput(inode);
428 } else 426 } else
429 spin_unlock(&nfsi->req_lock); 427 spin_unlock(&inode->i_lock);
430 nfs_clear_request(req); 428 nfs_clear_request(req);
431 nfs_release_request(req); 429 nfs_release_request(req);
432} 430}
@@ -457,14 +455,16 @@ nfs_dirty_request(struct nfs_page *req)
457static void 455static void
458nfs_mark_request_commit(struct nfs_page *req) 456nfs_mark_request_commit(struct nfs_page *req)
459{ 457{
460 struct inode *inode = req->wb_context->dentry->d_inode; 458 struct inode *inode = req->wb_context->path.dentry->d_inode;
461 struct nfs_inode *nfsi = NFS_I(inode); 459 struct nfs_inode *nfsi = NFS_I(inode);
462 460
463 spin_lock(&nfsi->req_lock); 461 spin_lock(&inode->i_lock);
464 nfs_list_add_request(req, &nfsi->commit);
465 nfsi->ncommit++; 462 nfsi->ncommit++;
466 set_bit(PG_NEED_COMMIT, &(req)->wb_flags); 463 set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
467 spin_unlock(&nfsi->req_lock); 464 radix_tree_tag_set(&nfsi->nfs_page_tree,
465 req->wb_index,
466 NFS_PAGE_TAG_COMMIT);
467 spin_unlock(&inode->i_lock);
468 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 468 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
469 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 469 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
470} 470}
@@ -526,18 +526,18 @@ static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, u
526 idx_end = idx_start + npages - 1; 526 idx_end = idx_start + npages - 1;
527 527
528 next = idx_start; 528 next = idx_start;
529 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { 529 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
530 if (req->wb_index > idx_end) 530 if (req->wb_index > idx_end)
531 break; 531 break;
532 532
533 next = req->wb_index + 1; 533 next = req->wb_index + 1;
534 BUG_ON(!NFS_WBACK_BUSY(req)); 534 BUG_ON(!NFS_WBACK_BUSY(req));
535 535
536 atomic_inc(&req->wb_count); 536 kref_get(&req->wb_kref);
537 spin_unlock(&nfsi->req_lock); 537 spin_unlock(&inode->i_lock);
538 error = nfs_wait_on_request(req); 538 error = nfs_wait_on_request(req);
539 nfs_release_request(req); 539 nfs_release_request(req);
540 spin_lock(&nfsi->req_lock); 540 spin_lock(&inode->i_lock);
541 if (error < 0) 541 if (error < 0)
542 return error; 542 return error;
543 res++; 543 res++;
@@ -577,10 +577,9 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u
577 int res = 0; 577 int res = 0;
578 578
579 if (nfsi->ncommit != 0) { 579 if (nfsi->ncommit != 0) {
580 res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); 580 res = nfs_scan_list(nfsi, dst, idx_start, npages,
581 NFS_PAGE_TAG_COMMIT);
581 nfsi->ncommit -= res; 582 nfsi->ncommit -= res;
582 if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
583 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
584 } 583 }
585 return res; 584 return res;
586} 585}
@@ -603,7 +602,6 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
603{ 602{
604 struct address_space *mapping = page->mapping; 603 struct address_space *mapping = page->mapping;
605 struct inode *inode = mapping->host; 604 struct inode *inode = mapping->host;
606 struct nfs_inode *nfsi = NFS_I(inode);
607 struct nfs_page *req, *new = NULL; 605 struct nfs_page *req, *new = NULL;
608 pgoff_t rqend, end; 606 pgoff_t rqend, end;
609 607
@@ -613,13 +611,13 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
613 /* Loop over all inode entries and see if we find 611 /* Loop over all inode entries and see if we find
614 * A request for the page we wish to update 612 * A request for the page we wish to update
615 */ 613 */
616 spin_lock(&nfsi->req_lock); 614 spin_lock(&inode->i_lock);
617 req = nfs_page_find_request_locked(page); 615 req = nfs_page_find_request_locked(page);
618 if (req) { 616 if (req) {
619 if (!nfs_lock_request_dontget(req)) { 617 if (!nfs_lock_request_dontget(req)) {
620 int error; 618 int error;
621 619
622 spin_unlock(&nfsi->req_lock); 620 spin_unlock(&inode->i_lock);
623 error = nfs_wait_on_request(req); 621 error = nfs_wait_on_request(req);
624 nfs_release_request(req); 622 nfs_release_request(req);
625 if (error < 0) { 623 if (error < 0) {
@@ -629,7 +627,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
629 } 627 }
630 continue; 628 continue;
631 } 629 }
632 spin_unlock(&nfsi->req_lock); 630 spin_unlock(&inode->i_lock);
633 if (new) 631 if (new)
634 nfs_release_request(new); 632 nfs_release_request(new);
635 break; 633 break;
@@ -640,14 +638,14 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
640 nfs_lock_request_dontget(new); 638 nfs_lock_request_dontget(new);
641 error = nfs_inode_add_request(inode, new); 639 error = nfs_inode_add_request(inode, new);
642 if (error) { 640 if (error) {
643 spin_unlock(&nfsi->req_lock); 641 spin_unlock(&inode->i_lock);
644 nfs_unlock_request(new); 642 nfs_unlock_request(new);
645 return ERR_PTR(error); 643 return ERR_PTR(error);
646 } 644 }
647 spin_unlock(&nfsi->req_lock); 645 spin_unlock(&inode->i_lock);
648 return new; 646 return new;
649 } 647 }
650 spin_unlock(&nfsi->req_lock); 648 spin_unlock(&inode->i_lock);
651 649
652 new = nfs_create_request(ctx, inode, page, offset, bytes); 650 new = nfs_create_request(ctx, inode, page, offset, bytes);
653 if (IS_ERR(new)) 651 if (IS_ERR(new))
@@ -751,12 +749,17 @@ int nfs_updatepage(struct file *file, struct page *page,
751static void nfs_writepage_release(struct nfs_page *req) 749static void nfs_writepage_release(struct nfs_page *req)
752{ 750{
753 751
754 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { 752 if (PageError(req->wb_page)) {
753 nfs_end_page_writeback(req->wb_page);
754 nfs_inode_remove_request(req);
755 } else if (!nfs_reschedule_unstable_write(req)) {
756 /* Set the PG_uptodate flag */
757 nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes);
755 nfs_end_page_writeback(req->wb_page); 758 nfs_end_page_writeback(req->wb_page);
756 nfs_inode_remove_request(req); 759 nfs_inode_remove_request(req);
757 } else 760 } else
758 nfs_end_page_writeback(req->wb_page); 761 nfs_end_page_writeback(req->wb_page);
759 nfs_clear_page_writeback(req); 762 nfs_clear_page_tag_locked(req);
760} 763}
761 764
762static inline int flush_task_priority(int how) 765static inline int flush_task_priority(int how)
@@ -786,7 +789,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
786 * NB: take care not to mess about with data->commit et al. */ 789 * NB: take care not to mess about with data->commit et al. */
787 790
788 data->req = req; 791 data->req = req;
789 data->inode = inode = req->wb_context->dentry->d_inode; 792 data->inode = inode = req->wb_context->path.dentry->d_inode;
790 data->cred = req->wb_context->cred; 793 data->cred = req->wb_context->cred;
791 794
792 data->args.fh = NFS_FH(inode); 795 data->args.fh = NFS_FH(inode);
@@ -885,7 +888,7 @@ out_bad:
885 } 888 }
886 nfs_redirty_request(req); 889 nfs_redirty_request(req);
887 nfs_end_page_writeback(req->wb_page); 890 nfs_end_page_writeback(req->wb_page);
888 nfs_clear_page_writeback(req); 891 nfs_clear_page_tag_locked(req);
889 return -ENOMEM; 892 return -ENOMEM;
890} 893}
891 894
@@ -928,7 +931,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
928 nfs_list_remove_request(req); 931 nfs_list_remove_request(req);
929 nfs_redirty_request(req); 932 nfs_redirty_request(req);
930 nfs_end_page_writeback(req->wb_page); 933 nfs_end_page_writeback(req->wb_page);
931 nfs_clear_page_writeback(req); 934 nfs_clear_page_tag_locked(req);
932 } 935 }
933 return -ENOMEM; 936 return -ENOMEM;
934} 937}
@@ -954,8 +957,8 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
954 struct page *page = req->wb_page; 957 struct page *page = req->wb_page;
955 958
956 dprintk("NFS: write (%s/%Ld %d@%Ld)", 959 dprintk("NFS: write (%s/%Ld %d@%Ld)",
957 req->wb_context->dentry->d_inode->i_sb->s_id, 960 req->wb_context->path.dentry->d_inode->i_sb->s_id,
958 (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 961 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
959 req->wb_bytes, 962 req->wb_bytes,
960 (long long)req_offset(req)); 963 (long long)req_offset(req));
961 964
@@ -970,9 +973,9 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
970 } 973 }
971 974
972 if (nfs_write_need_commit(data)) { 975 if (nfs_write_need_commit(data)) {
973 spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; 976 struct inode *inode = page->mapping->host;
974 977
975 spin_lock(req_lock); 978 spin_lock(&inode->i_lock);
976 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { 979 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
977 /* Do nothing we need to resend the writes */ 980 /* Do nothing we need to resend the writes */
978 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { 981 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
@@ -983,7 +986,7 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
983 clear_bit(PG_NEED_COMMIT, &req->wb_flags); 986 clear_bit(PG_NEED_COMMIT, &req->wb_flags);
984 dprintk(" server reboot detected\n"); 987 dprintk(" server reboot detected\n");
985 } 988 }
986 spin_unlock(req_lock); 989 spin_unlock(&inode->i_lock);
987 } else 990 } else
988 dprintk(" OK\n"); 991 dprintk(" OK\n");
989 992
@@ -1020,8 +1023,8 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1020 page = req->wb_page; 1023 page = req->wb_page;
1021 1024
1022 dprintk("NFS: write (%s/%Ld %d@%Ld)", 1025 dprintk("NFS: write (%s/%Ld %d@%Ld)",
1023 req->wb_context->dentry->d_inode->i_sb->s_id, 1026 req->wb_context->path.dentry->d_inode->i_sb->s_id,
1024 (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 1027 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1025 req->wb_bytes, 1028 req->wb_bytes,
1026 (long long)req_offset(req)); 1029 (long long)req_offset(req));
1027 1030
@@ -1039,12 +1042,14 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1039 dprintk(" marked for commit\n"); 1042 dprintk(" marked for commit\n");
1040 goto next; 1043 goto next;
1041 } 1044 }
1045 /* Set the PG_uptodate flag? */
1046 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
1042 dprintk(" OK\n"); 1047 dprintk(" OK\n");
1043remove_request: 1048remove_request:
1044 nfs_end_page_writeback(page); 1049 nfs_end_page_writeback(page);
1045 nfs_inode_remove_request(req); 1050 nfs_inode_remove_request(req);
1046 next: 1051 next:
1047 nfs_clear_page_writeback(req); 1052 nfs_clear_page_tag_locked(req);
1048 } 1053 }
1049} 1054}
1050 1055
@@ -1157,7 +1162,7 @@ static void nfs_commit_rpcsetup(struct list_head *head,
1157 1162
1158 list_splice_init(head, &data->pages); 1163 list_splice_init(head, &data->pages);
1159 first = nfs_list_entry(data->pages.next); 1164 first = nfs_list_entry(data->pages.next);
1160 inode = first->wb_context->dentry->d_inode; 1165 inode = first->wb_context->path.dentry->d_inode;
1161 1166
1162 data->inode = inode; 1167 data->inode = inode;
1163 data->cred = first->wb_context->cred; 1168 data->cred = first->wb_context->cred;
@@ -1207,7 +1212,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1207 nfs_list_remove_request(req); 1212 nfs_list_remove_request(req);
1208 nfs_mark_request_commit(req); 1213 nfs_mark_request_commit(req);
1209 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1214 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1210 nfs_clear_page_writeback(req); 1215 nfs_clear_page_tag_locked(req);
1211 } 1216 }
1212 return -ENOMEM; 1217 return -ENOMEM;
1213} 1218}
@@ -1234,8 +1239,8 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1234 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1239 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1235 1240
1236 dprintk("NFS: commit (%s/%Ld %d@%Ld)", 1241 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1237 req->wb_context->dentry->d_inode->i_sb->s_id, 1242 req->wb_context->path.dentry->d_inode->i_sb->s_id,
1238 (long long)NFS_FILEID(req->wb_context->dentry->d_inode), 1243 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1239 req->wb_bytes, 1244 req->wb_bytes,
1240 (long long)req_offset(req)); 1245 (long long)req_offset(req));
1241 if (task->tk_status < 0) { 1246 if (task->tk_status < 0) {
@@ -1249,6 +1254,9 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1249 * returned by the server against all stored verfs. */ 1254 * returned by the server against all stored verfs. */
1250 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { 1255 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1251 /* We have a match */ 1256 /* We have a match */
1257 /* Set the PG_uptodate flag */
1258 nfs_mark_uptodate(req->wb_page, req->wb_pgbase,
1259 req->wb_bytes);
1252 nfs_inode_remove_request(req); 1260 nfs_inode_remove_request(req);
1253 dprintk(" OK\n"); 1261 dprintk(" OK\n");
1254 goto next; 1262 goto next;
@@ -1257,7 +1265,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1257 dprintk(" mismatch\n"); 1265 dprintk(" mismatch\n");
1258 nfs_redirty_request(req); 1266 nfs_redirty_request(req);
1259 next: 1267 next:
1260 nfs_clear_page_writeback(req); 1268 nfs_clear_page_tag_locked(req);
1261 } 1269 }
1262} 1270}
1263 1271
@@ -1268,13 +1276,12 @@ static const struct rpc_call_ops nfs_commit_ops = {
1268 1276
1269int nfs_commit_inode(struct inode *inode, int how) 1277int nfs_commit_inode(struct inode *inode, int how)
1270{ 1278{
1271 struct nfs_inode *nfsi = NFS_I(inode);
1272 LIST_HEAD(head); 1279 LIST_HEAD(head);
1273 int res; 1280 int res;
1274 1281
1275 spin_lock(&nfsi->req_lock); 1282 spin_lock(&inode->i_lock);
1276 res = nfs_scan_commit(inode, &head, 0, 0); 1283 res = nfs_scan_commit(inode, &head, 0, 0);
1277 spin_unlock(&nfsi->req_lock); 1284 spin_unlock(&inode->i_lock);
1278 if (res) { 1285 if (res) {
1279 int error = nfs_commit_list(inode, &head, how); 1286 int error = nfs_commit_list(inode, &head, how);
1280 if (error < 0) 1287 if (error < 0)
@@ -1292,7 +1299,6 @@ static inline int nfs_commit_list(struct inode *inode, struct list_head *head, i
1292long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1299long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1293{ 1300{
1294 struct inode *inode = mapping->host; 1301 struct inode *inode = mapping->host;
1295 struct nfs_inode *nfsi = NFS_I(inode);
1296 pgoff_t idx_start, idx_end; 1302 pgoff_t idx_start, idx_end;
1297 unsigned int npages = 0; 1303 unsigned int npages = 0;
1298 LIST_HEAD(head); 1304 LIST_HEAD(head);
@@ -1314,7 +1320,7 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
1314 } 1320 }
1315 } 1321 }
1316 how &= ~FLUSH_NOCOMMIT; 1322 how &= ~FLUSH_NOCOMMIT;
1317 spin_lock(&nfsi->req_lock); 1323 spin_lock(&inode->i_lock);
1318 do { 1324 do {
1319 ret = nfs_wait_on_requests_locked(inode, idx_start, npages); 1325 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1320 if (ret != 0) 1326 if (ret != 0)
@@ -1325,18 +1331,19 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr
1325 if (pages == 0) 1331 if (pages == 0)
1326 break; 1332 break;
1327 if (how & FLUSH_INVALIDATE) { 1333 if (how & FLUSH_INVALIDATE) {
1328 spin_unlock(&nfsi->req_lock); 1334 spin_unlock(&inode->i_lock);
1329 nfs_cancel_commit_list(&head); 1335 nfs_cancel_commit_list(&head);
1330 ret = pages; 1336 ret = pages;
1331 spin_lock(&nfsi->req_lock); 1337 spin_lock(&inode->i_lock);
1332 continue; 1338 continue;
1333 } 1339 }
1334 pages += nfs_scan_commit(inode, &head, 0, 0); 1340 pages += nfs_scan_commit(inode, &head, 0, 0);
1335 spin_unlock(&nfsi->req_lock); 1341 spin_unlock(&inode->i_lock);
1336 ret = nfs_commit_list(inode, &head, how); 1342 ret = nfs_commit_list(inode, &head, how);
1337 spin_lock(&nfsi->req_lock); 1343 spin_lock(&inode->i_lock);
1344
1338 } while (ret >= 0); 1345 } while (ret >= 0);
1339 spin_unlock(&nfsi->req_lock); 1346 spin_unlock(&inode->i_lock);
1340 return ret; 1347 return ret;
1341} 1348}
1342 1349
@@ -1430,7 +1437,6 @@ int nfs_set_page_dirty(struct page *page)
1430{ 1437{
1431 struct address_space *mapping = page->mapping; 1438 struct address_space *mapping = page->mapping;
1432 struct inode *inode; 1439 struct inode *inode;
1433 spinlock_t *req_lock;
1434 struct nfs_page *req; 1440 struct nfs_page *req;
1435 int ret; 1441 int ret;
1436 1442
@@ -1439,18 +1445,17 @@ int nfs_set_page_dirty(struct page *page)
1439 inode = mapping->host; 1445 inode = mapping->host;
1440 if (!inode) 1446 if (!inode)
1441 goto out_raced; 1447 goto out_raced;
1442 req_lock = &NFS_I(inode)->req_lock; 1448 spin_lock(&inode->i_lock);
1443 spin_lock(req_lock);
1444 req = nfs_page_find_request_locked(page); 1449 req = nfs_page_find_request_locked(page);
1445 if (req != NULL) { 1450 if (req != NULL) {
1446 /* Mark any existing write requests for flushing */ 1451 /* Mark any existing write requests for flushing */
1447 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); 1452 ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
1448 spin_unlock(req_lock); 1453 spin_unlock(&inode->i_lock);
1449 nfs_release_request(req); 1454 nfs_release_request(req);
1450 return ret; 1455 return ret;
1451 } 1456 }
1452 ret = __set_page_dirty_nobuffers(page); 1457 ret = __set_page_dirty_nobuffers(page);
1453 spin_unlock(req_lock); 1458 spin_unlock(&inode->i_lock);
1454 return ret; 1459 return ret;
1455out_raced: 1460out_raced:
1456 return !TestSetPageDirty(page); 1461 return !TestSetPageDirty(page);