aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c297
1 files changed, 107 insertions, 190 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d171696017f4..de38d63aa920 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -201,6 +201,7 @@ static int nfs_set_page_writeback(struct page *page)
201 struct inode *inode = page->mapping->host; 201 struct inode *inode = page->mapping->host;
202 struct nfs_server *nfss = NFS_SERVER(inode); 202 struct nfs_server *nfss = NFS_SERVER(inode);
203 203
204 page_cache_get(page);
204 if (atomic_long_inc_return(&nfss->writeback) > 205 if (atomic_long_inc_return(&nfss->writeback) >
205 NFS_CONGESTION_ON_THRESH) { 206 NFS_CONGESTION_ON_THRESH) {
206 set_bdi_congested(&nfss->backing_dev_info, 207 set_bdi_congested(&nfss->backing_dev_info,
@@ -216,6 +217,7 @@ static void nfs_end_page_writeback(struct page *page)
216 struct nfs_server *nfss = NFS_SERVER(inode); 217 struct nfs_server *nfss = NFS_SERVER(inode);
217 218
218 end_page_writeback(page); 219 end_page_writeback(page);
220 page_cache_release(page);
219 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 221 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
220 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 222 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
221} 223}
@@ -421,6 +423,7 @@ static void
421nfs_mark_request_dirty(struct nfs_page *req) 423nfs_mark_request_dirty(struct nfs_page *req)
422{ 424{
423 __set_page_dirty_nobuffers(req->wb_page); 425 __set_page_dirty_nobuffers(req->wb_page);
426 __mark_inode_dirty(req->wb_page->mapping->host, I_DIRTY_DATASYNC);
424} 427}
425 428
426#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 429#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -438,6 +441,7 @@ nfs_mark_request_commit(struct nfs_page *req)
438 radix_tree_tag_set(&nfsi->nfs_page_tree, 441 radix_tree_tag_set(&nfsi->nfs_page_tree,
439 req->wb_index, 442 req->wb_index,
440 NFS_PAGE_TAG_COMMIT); 443 NFS_PAGE_TAG_COMMIT);
444 nfsi->ncommit++;
441 spin_unlock(&inode->i_lock); 445 spin_unlock(&inode->i_lock);
442 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 446 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
443 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); 447 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
@@ -501,57 +505,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
501} 505}
502#endif 506#endif
503 507
504/*
505 * Wait for a request to complete.
506 *
507 * Interruptible by fatal signals only.
508 */
509static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
510{
511 struct nfs_inode *nfsi = NFS_I(inode);
512 struct nfs_page *req;
513 pgoff_t idx_end, next;
514 unsigned int res = 0;
515 int error;
516
517 if (npages == 0)
518 idx_end = ~0;
519 else
520 idx_end = idx_start + npages - 1;
521
522 next = idx_start;
523 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
524 if (req->wb_index > idx_end)
525 break;
526
527 next = req->wb_index + 1;
528 BUG_ON(!NFS_WBACK_BUSY(req));
529
530 kref_get(&req->wb_kref);
531 spin_unlock(&inode->i_lock);
532 error = nfs_wait_on_request(req);
533 nfs_release_request(req);
534 spin_lock(&inode->i_lock);
535 if (error < 0)
536 return error;
537 res++;
538 }
539 return res;
540}
541
542static void nfs_cancel_commit_list(struct list_head *head)
543{
544 struct nfs_page *req;
545
546 while(!list_empty(head)) {
547 req = nfs_list_entry(head->next);
548 nfs_list_remove_request(req);
549 nfs_clear_request_commit(req);
550 nfs_inode_remove_request(req);
551 nfs_unlock_request(req);
552 }
553}
554
555#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 508#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
556static int 509static int
557nfs_need_commit(struct nfs_inode *nfsi) 510nfs_need_commit(struct nfs_inode *nfsi)
@@ -573,11 +526,17 @@ static int
573nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) 526nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
574{ 527{
575 struct nfs_inode *nfsi = NFS_I(inode); 528 struct nfs_inode *nfsi = NFS_I(inode);
529 int ret;
576 530
577 if (!nfs_need_commit(nfsi)) 531 if (!nfs_need_commit(nfsi))
578 return 0; 532 return 0;
579 533
580 return nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); 534 ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
535 if (ret > 0)
536 nfsi->ncommit -= ret;
537 if (nfs_need_commit(NFS_I(inode)))
538 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
539 return ret;
581} 540}
582#else 541#else
583static inline int nfs_need_commit(struct nfs_inode *nfsi) 542static inline int nfs_need_commit(struct nfs_inode *nfsi)
@@ -642,9 +601,10 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
642 spin_lock(&inode->i_lock); 601 spin_lock(&inode->i_lock);
643 } 602 }
644 603
645 if (nfs_clear_request_commit(req)) 604 if (nfs_clear_request_commit(req) &&
646 radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, 605 radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
647 req->wb_index, NFS_PAGE_TAG_COMMIT); 606 req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
607 NFS_I(inode)->ncommit--;
648 608
649 /* Okay, the request matches. Update the region */ 609 /* Okay, the request matches. Update the region */
650 if (offset < req->wb_offset) { 610 if (offset < req->wb_offset) {
@@ -703,9 +663,11 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
703 req = nfs_setup_write_request(ctx, page, offset, count); 663 req = nfs_setup_write_request(ctx, page, offset, count);
704 if (IS_ERR(req)) 664 if (IS_ERR(req))
705 return PTR_ERR(req); 665 return PTR_ERR(req);
666 nfs_mark_request_dirty(req);
706 /* Update file length */ 667 /* Update file length */
707 nfs_grow_file(page, offset, count); 668 nfs_grow_file(page, offset, count);
708 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); 669 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
670 nfs_mark_request_dirty(req);
709 nfs_clear_page_tag_locked(req); 671 nfs_clear_page_tag_locked(req);
710 return 0; 672 return 0;
711} 673}
@@ -782,8 +744,6 @@ int nfs_updatepage(struct file *file, struct page *page,
782 status = nfs_writepage_setup(ctx, page, offset, count); 744 status = nfs_writepage_setup(ctx, page, offset, count);
783 if (status < 0) 745 if (status < 0)
784 nfs_set_pageerror(page); 746 nfs_set_pageerror(page);
785 else
786 __set_page_dirty_nobuffers(page);
787 747
788 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 748 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
789 status, (long long)i_size_read(inode)); 749 status, (long long)i_size_read(inode));
@@ -792,13 +752,12 @@ int nfs_updatepage(struct file *file, struct page *page,
792 752
793static void nfs_writepage_release(struct nfs_page *req) 753static void nfs_writepage_release(struct nfs_page *req)
794{ 754{
755 struct page *page = req->wb_page;
795 756
796 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { 757 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req))
797 nfs_end_page_writeback(req->wb_page);
798 nfs_inode_remove_request(req); 758 nfs_inode_remove_request(req);
799 } else
800 nfs_end_page_writeback(req->wb_page);
801 nfs_clear_page_tag_locked(req); 759 nfs_clear_page_tag_locked(req);
760 nfs_end_page_writeback(page);
802} 761}
803 762
804static int flush_task_priority(int how) 763static int flush_task_priority(int how)
@@ -822,7 +781,6 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
822 int how) 781 int how)
823{ 782{
824 struct inode *inode = req->wb_context->path.dentry->d_inode; 783 struct inode *inode = req->wb_context->path.dentry->d_inode;
825 int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
826 int priority = flush_task_priority(how); 784 int priority = flush_task_priority(how);
827 struct rpc_task *task; 785 struct rpc_task *task;
828 struct rpc_message msg = { 786 struct rpc_message msg = {
@@ -837,9 +795,10 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
837 .callback_ops = call_ops, 795 .callback_ops = call_ops,
838 .callback_data = data, 796 .callback_data = data,
839 .workqueue = nfsiod_workqueue, 797 .workqueue = nfsiod_workqueue,
840 .flags = flags, 798 .flags = RPC_TASK_ASYNC,
841 .priority = priority, 799 .priority = priority,
842 }; 800 };
801 int ret = 0;
843 802
844 /* Set up the RPC argument and reply structs 803 /* Set up the RPC argument and reply structs
845 * NB: take care not to mess about with data->commit et al. */ 804 * NB: take care not to mess about with data->commit et al. */
@@ -878,10 +837,18 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
878 (unsigned long long)data->args.offset); 837 (unsigned long long)data->args.offset);
879 838
880 task = rpc_run_task(&task_setup_data); 839 task = rpc_run_task(&task_setup_data);
881 if (IS_ERR(task)) 840 if (IS_ERR(task)) {
882 return PTR_ERR(task); 841 ret = PTR_ERR(task);
842 goto out;
843 }
844 if (how & FLUSH_SYNC) {
845 ret = rpc_wait_for_completion_task(task);
846 if (ret == 0)
847 ret = task->tk_status;
848 }
883 rpc_put_task(task); 849 rpc_put_task(task);
884 return 0; 850out:
851 return ret;
885} 852}
886 853
887/* If a nfs_flush_* function fails, it should remove reqs from @head and 854/* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -890,9 +857,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
890 */ 857 */
891static void nfs_redirty_request(struct nfs_page *req) 858static void nfs_redirty_request(struct nfs_page *req)
892{ 859{
860 struct page *page = req->wb_page;
861
893 nfs_mark_request_dirty(req); 862 nfs_mark_request_dirty(req);
894 nfs_end_page_writeback(req->wb_page);
895 nfs_clear_page_tag_locked(req); 863 nfs_clear_page_tag_locked(req);
864 nfs_end_page_writeback(page);
896} 865}
897 866
898/* 867/*
@@ -1127,16 +1096,15 @@ static void nfs_writeback_release_full(void *calldata)
1127 if (nfs_write_need_commit(data)) { 1096 if (nfs_write_need_commit(data)) {
1128 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); 1097 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1129 nfs_mark_request_commit(req); 1098 nfs_mark_request_commit(req);
1130 nfs_end_page_writeback(page);
1131 dprintk(" marked for commit\n"); 1099 dprintk(" marked for commit\n");
1132 goto next; 1100 goto next;
1133 } 1101 }
1134 dprintk(" OK\n"); 1102 dprintk(" OK\n");
1135remove_request: 1103remove_request:
1136 nfs_end_page_writeback(page);
1137 nfs_inode_remove_request(req); 1104 nfs_inode_remove_request(req);
1138 next: 1105 next:
1139 nfs_clear_page_tag_locked(req); 1106 nfs_clear_page_tag_locked(req);
1107 nfs_end_page_writeback(page);
1140 } 1108 }
1141 nfs_writedata_release(calldata); 1109 nfs_writedata_release(calldata);
1142} 1110}
@@ -1233,7 +1201,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1233 1201
1234 1202
1235#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1203#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1236void nfs_commitdata_release(void *data) 1204static void nfs_commitdata_release(void *data)
1237{ 1205{
1238 struct nfs_write_data *wdata = data; 1206 struct nfs_write_data *wdata = data;
1239 1207
@@ -1250,7 +1218,6 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1250{ 1218{
1251 struct nfs_page *first = nfs_list_entry(head->next); 1219 struct nfs_page *first = nfs_list_entry(head->next);
1252 struct inode *inode = first->wb_context->path.dentry->d_inode; 1220 struct inode *inode = first->wb_context->path.dentry->d_inode;
1253 int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1254 int priority = flush_task_priority(how); 1221 int priority = flush_task_priority(how);
1255 struct rpc_task *task; 1222 struct rpc_task *task;
1256 struct rpc_message msg = { 1223 struct rpc_message msg = {
@@ -1265,7 +1232,7 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1265 .callback_ops = &nfs_commit_ops, 1232 .callback_ops = &nfs_commit_ops,
1266 .callback_data = data, 1233 .callback_data = data,
1267 .workqueue = nfsiod_workqueue, 1234 .workqueue = nfsiod_workqueue,
1268 .flags = flags, 1235 .flags = RPC_TASK_ASYNC,
1269 .priority = priority, 1236 .priority = priority,
1270 }; 1237 };
1271 1238
@@ -1295,6 +1262,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1295 task = rpc_run_task(&task_setup_data); 1262 task = rpc_run_task(&task_setup_data);
1296 if (IS_ERR(task)) 1263 if (IS_ERR(task))
1297 return PTR_ERR(task); 1264 return PTR_ERR(task);
1265 if (how & FLUSH_SYNC)
1266 rpc_wait_for_completion_task(task);
1298 rpc_put_task(task); 1267 rpc_put_task(task);
1299 return 0; 1268 return 0;
1300} 1269}
@@ -1391,7 +1360,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
1391 .rpc_release = nfs_commit_release, 1360 .rpc_release = nfs_commit_release,
1392}; 1361};
1393 1362
1394int nfs_commit_inode(struct inode *inode, int how) 1363static int nfs_commit_inode(struct inode *inode, int how)
1395{ 1364{
1396 LIST_HEAD(head); 1365 LIST_HEAD(head);
1397 int res; 1366 int res;
@@ -1406,92 +1375,51 @@ int nfs_commit_inode(struct inode *inode, int how)
1406 } 1375 }
1407 return res; 1376 return res;
1408} 1377}
1409#else
1410static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1411{
1412 return 0;
1413}
1414#endif
1415 1378
1416long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1379static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1417{ 1380{
1418 struct inode *inode = mapping->host; 1381 struct nfs_inode *nfsi = NFS_I(inode);
1419 pgoff_t idx_start, idx_end; 1382 int flags = FLUSH_SYNC;
1420 unsigned int npages = 0; 1383 int ret = 0;
1421 LIST_HEAD(head); 1384
1422 int nocommit = how & FLUSH_NOCOMMIT; 1385 /* Don't commit yet if this is a non-blocking flush and there are
1423 long pages, ret; 1386 * lots of outstanding writes for this mapping.
1424 1387 */
1425 /* FIXME */ 1388 if (wbc->sync_mode == WB_SYNC_NONE &&
1426 if (wbc->range_cyclic) 1389 nfsi->ncommit <= (nfsi->npages >> 1))
1427 idx_start = 0; 1390 goto out_mark_dirty;
1428 else { 1391
1429 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; 1392 if (wbc->nonblocking || wbc->for_background)
1430 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; 1393 flags = 0;
1431 if (idx_end > idx_start) { 1394 ret = nfs_commit_inode(inode, flags);
1432 pgoff_t l_npages = 1 + idx_end - idx_start; 1395 if (ret >= 0) {
1433 npages = l_npages; 1396 if (wbc->sync_mode == WB_SYNC_NONE) {
1434 if (sizeof(npages) != sizeof(l_npages) && 1397 if (ret < wbc->nr_to_write)
1435 (pgoff_t)npages != l_npages) 1398 wbc->nr_to_write -= ret;
1436 npages = 0; 1399 else
1400 wbc->nr_to_write = 0;
1437 } 1401 }
1402 return 0;
1438 } 1403 }
1439 how &= ~FLUSH_NOCOMMIT; 1404out_mark_dirty:
1440 spin_lock(&inode->i_lock); 1405 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1441 do {
1442 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1443 if (ret != 0)
1444 continue;
1445 if (nocommit)
1446 break;
1447 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1448 if (pages == 0)
1449 break;
1450 if (how & FLUSH_INVALIDATE) {
1451 spin_unlock(&inode->i_lock);
1452 nfs_cancel_commit_list(&head);
1453 ret = pages;
1454 spin_lock(&inode->i_lock);
1455 continue;
1456 }
1457 pages += nfs_scan_commit(inode, &head, 0, 0);
1458 spin_unlock(&inode->i_lock);
1459 ret = nfs_commit_list(inode, &head, how);
1460 spin_lock(&inode->i_lock);
1461
1462 } while (ret >= 0);
1463 spin_unlock(&inode->i_lock);
1464 return ret; 1406 return ret;
1465} 1407}
1466 1408#else
1467static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) 1409static int nfs_commit_inode(struct inode *inode, int how)
1468{ 1410{
1469 int ret;
1470
1471 ret = nfs_writepages(mapping, wbc);
1472 if (ret < 0)
1473 goto out;
1474 ret = nfs_sync_mapping_wait(mapping, wbc, how);
1475 if (ret < 0)
1476 goto out;
1477 return 0; 1411 return 0;
1478out:
1479 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1480 return ret;
1481} 1412}
1482 1413
1483/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ 1414static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1484static int nfs_write_mapping(struct address_space *mapping, int how)
1485{ 1415{
1486 struct writeback_control wbc = { 1416 return 0;
1487 .bdi = mapping->backing_dev_info, 1417}
1488 .sync_mode = WB_SYNC_ALL, 1418#endif
1489 .nr_to_write = LONG_MAX,
1490 .range_start = 0,
1491 .range_end = LLONG_MAX,
1492 };
1493 1419
1494 return __nfs_write_mapping(mapping, &wbc, how); 1420int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1421{
1422 return nfs_commit_unstable_pages(inode, wbc);
1495} 1423}
1496 1424
1497/* 1425/*
@@ -1499,37 +1427,26 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
1499 */ 1427 */
1500int nfs_wb_all(struct inode *inode) 1428int nfs_wb_all(struct inode *inode)
1501{ 1429{
1502 return nfs_write_mapping(inode->i_mapping, 0); 1430 struct writeback_control wbc = {
1503} 1431 .sync_mode = WB_SYNC_ALL,
1432 .nr_to_write = LONG_MAX,
1433 .range_start = 0,
1434 .range_end = LLONG_MAX,
1435 };
1504 1436
1505int nfs_wb_nocommit(struct inode *inode) 1437 return sync_inode(inode, &wbc);
1506{
1507 return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
1508} 1438}
1509 1439
1510int nfs_wb_page_cancel(struct inode *inode, struct page *page) 1440int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1511{ 1441{
1512 struct nfs_page *req; 1442 struct nfs_page *req;
1513 loff_t range_start = page_offset(page);
1514 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1515 struct writeback_control wbc = {
1516 .bdi = page->mapping->backing_dev_info,
1517 .sync_mode = WB_SYNC_ALL,
1518 .nr_to_write = LONG_MAX,
1519 .range_start = range_start,
1520 .range_end = range_end,
1521 };
1522 int ret = 0; 1443 int ret = 0;
1523 1444
1524 BUG_ON(!PageLocked(page)); 1445 BUG_ON(!PageLocked(page));
1525 for (;;) { 1446 for (;;) {
1526 req = nfs_page_find_request(page); 1447 req = nfs_page_find_request(page);
1527 if (req == NULL) 1448 if (req == NULL)
1528 goto out;
1529 if (test_bit(PG_CLEAN, &req->wb_flags)) {
1530 nfs_release_request(req);
1531 break; 1449 break;
1532 }
1533 if (nfs_lock_request_dontget(req)) { 1450 if (nfs_lock_request_dontget(req)) {
1534 nfs_inode_remove_request(req); 1451 nfs_inode_remove_request(req);
1535 /* 1452 /*
@@ -1541,55 +1458,56 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1541 break; 1458 break;
1542 } 1459 }
1543 ret = nfs_wait_on_request(req); 1460 ret = nfs_wait_on_request(req);
1461 nfs_release_request(req);
1544 if (ret < 0) 1462 if (ret < 0)
1545 goto out; 1463 break;
1546 } 1464 }
1547 if (!PagePrivate(page))
1548 return 0;
1549 ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
1550out:
1551 return ret; 1465 return ret;
1552} 1466}
1553 1467
1554static int nfs_wb_page_priority(struct inode *inode, struct page *page, 1468/*
1555 int how) 1469 * Write back all requests on one page - we do this before reading it.
1470 */
1471int nfs_wb_page(struct inode *inode, struct page *page)
1556{ 1472{
1557 loff_t range_start = page_offset(page); 1473 loff_t range_start = page_offset(page);
1558 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1474 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1559 struct writeback_control wbc = { 1475 struct writeback_control wbc = {
1560 .bdi = page->mapping->backing_dev_info,
1561 .sync_mode = WB_SYNC_ALL, 1476 .sync_mode = WB_SYNC_ALL,
1562 .nr_to_write = LONG_MAX, 1477 .nr_to_write = 0,
1563 .range_start = range_start, 1478 .range_start = range_start,
1564 .range_end = range_end, 1479 .range_end = range_end,
1565 }; 1480 };
1481 struct nfs_page *req;
1482 int need_commit;
1566 int ret; 1483 int ret;
1567 1484
1568 do { 1485 while(PagePrivate(page)) {
1569 if (clear_page_dirty_for_io(page)) { 1486 if (clear_page_dirty_for_io(page)) {
1570 ret = nfs_writepage_locked(page, &wbc); 1487 ret = nfs_writepage_locked(page, &wbc);
1571 if (ret < 0) 1488 if (ret < 0)
1572 goto out_error; 1489 goto out_error;
1573 } else if (!PagePrivate(page)) 1490 }
1491 req = nfs_find_and_lock_request(page);
1492 if (!req)
1574 break; 1493 break;
1575 ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); 1494 if (IS_ERR(req)) {
1576 if (ret < 0) 1495 ret = PTR_ERR(req);
1577 goto out_error; 1496 goto out_error;
1578 } while (PagePrivate(page)); 1497 }
1498 need_commit = test_bit(PG_CLEAN, &req->wb_flags);
1499 nfs_clear_page_tag_locked(req);
1500 if (need_commit) {
1501 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1502 if (ret < 0)
1503 goto out_error;
1504 }
1505 }
1579 return 0; 1506 return 0;
1580out_error: 1507out_error:
1581 __mark_inode_dirty(inode, I_DIRTY_PAGES);
1582 return ret; 1508 return ret;
1583} 1509}
1584 1510
1585/*
1586 * Write back all requests on one page - we do this before reading it.
1587 */
1588int nfs_wb_page(struct inode *inode, struct page* page)
1589{
1590 return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1591}
1592
1593#ifdef CONFIG_MIGRATION 1511#ifdef CONFIG_MIGRATION
1594int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 1512int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1595 struct page *page) 1513 struct page *page)
@@ -1597,8 +1515,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1597 struct nfs_page *req; 1515 struct nfs_page *req;
1598 int ret; 1516 int ret;
1599 1517
1600 if (PageFsCache(page)) 1518 nfs_fscache_release_page(page, GFP_KERNEL);
1601 nfs_fscache_release_page(page, GFP_KERNEL);
1602 1519
1603 req = nfs_find_and_lock_request(page); 1520 req = nfs_find_and_lock_request(page);
1604 ret = PTR_ERR(req); 1521 ret = PTR_ERR(req);