aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-05 00:35:42 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-06 10:46:40 -0500
commit61822ab5e3ed09fcfc49e37227b655202adf6130 (patch)
tree224a470689463469672ed38b0da3d9f47bc8a245 /fs/nfs/write.c
parente261f51f25b98c213e0b3d7f2109b117d714f69d (diff)
NFS: Ensure we only call set_page_writeback() under the page lock
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c38
1 files changed, 28 insertions, 10 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 130528d09a26..bd4dff9dbd69 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -81,7 +81,6 @@ static void nfs_mark_request_dirty(struct nfs_page *req);
81static int nfs_wait_on_write_congestion(struct address_space *, int); 81static int nfs_wait_on_write_congestion(struct address_space *, int);
82static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); 82static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int);
83static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); 83static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how);
84static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how);
85static const struct rpc_call_ops nfs_write_partial_ops; 84static const struct rpc_call_ops nfs_write_partial_ops;
86static const struct rpc_call_ops nfs_write_full_ops; 85static const struct rpc_call_ops nfs_write_full_ops;
87static const struct rpc_call_ops nfs_commit_ops; 86static const struct rpc_call_ops nfs_commit_ops;
@@ -280,8 +279,10 @@ static int nfs_page_mark_flush(struct page *page)
280 spin_lock(req_lock); 279 spin_lock(req_lock);
281 } 280 }
282 spin_unlock(req_lock); 281 spin_unlock(req_lock);
283 if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) 282 if (test_and_set_bit(PG_FLUSHING, &req->wb_flags) == 0) {
284 nfs_mark_request_dirty(req); 283 nfs_mark_request_dirty(req);
284 set_page_writeback(page);
285 }
285 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); 286 ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
286 nfs_unlock_request(req); 287 nfs_unlock_request(req);
287 return ret; 288 return ret;
@@ -443,6 +444,13 @@ nfs_mark_request_dirty(struct nfs_page *req)
443 mark_inode_dirty(inode); 444 mark_inode_dirty(inode);
444} 445}
445 446
447static void
448nfs_redirty_request(struct nfs_page *req)
449{
450 clear_bit(PG_FLUSHING, &req->wb_flags);
451 __set_page_dirty_nobuffers(req->wb_page);
452}
453
446/* 454/*
447 * Check if a request is dirty 455 * Check if a request is dirty
448 */ 456 */
@@ -777,7 +785,7 @@ static void nfs_writepage_release(struct nfs_page *req)
777#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 785#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
778 if (!PageError(req->wb_page)) { 786 if (!PageError(req->wb_page)) {
779 if (NFS_NEED_RESCHED(req)) { 787 if (NFS_NEED_RESCHED(req)) {
780 nfs_mark_request_dirty(req); 788 nfs_redirty_request(req);
781 goto out; 789 goto out;
782 } else if (NFS_NEED_COMMIT(req)) { 790 } else if (NFS_NEED_COMMIT(req)) {
783 nfs_mark_request_commit(req); 791 nfs_mark_request_commit(req);
@@ -893,7 +901,6 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how)
893 atomic_set(&req->wb_complete, requests); 901 atomic_set(&req->wb_complete, requests);
894 902
895 ClearPageError(page); 903 ClearPageError(page);
896 set_page_writeback(page);
897 offset = 0; 904 offset = 0;
898 nbytes = req->wb_bytes; 905 nbytes = req->wb_bytes;
899 do { 906 do {
@@ -923,7 +930,7 @@ out_bad:
923 list_del(&data->pages); 930 list_del(&data->pages);
924 nfs_writedata_release(data); 931 nfs_writedata_release(data);
925 } 932 }
926 nfs_mark_request_dirty(req); 933 nfs_redirty_request(req);
927 nfs_clear_page_writeback(req); 934 nfs_clear_page_writeback(req);
928 return -ENOMEM; 935 return -ENOMEM;
929} 936}
@@ -954,7 +961,6 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
954 nfs_list_remove_request(req); 961 nfs_list_remove_request(req);
955 nfs_list_add_request(req, &data->pages); 962 nfs_list_add_request(req, &data->pages);
956 ClearPageError(req->wb_page); 963 ClearPageError(req->wb_page);
957 set_page_writeback(req->wb_page);
958 *pages++ = req->wb_page; 964 *pages++ = req->wb_page;
959 count += req->wb_bytes; 965 count += req->wb_bytes;
960 } 966 }
@@ -969,7 +975,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how)
969 while (!list_empty(head)) { 975 while (!list_empty(head)) {
970 struct nfs_page *req = nfs_list_entry(head->next); 976 struct nfs_page *req = nfs_list_entry(head->next);
971 nfs_list_remove_request(req); 977 nfs_list_remove_request(req);
972 nfs_mark_request_dirty(req); 978 nfs_redirty_request(req);
973 nfs_clear_page_writeback(req); 979 nfs_clear_page_writeback(req);
974 } 980 }
975 return -ENOMEM; 981 return -ENOMEM;
@@ -1004,7 +1010,7 @@ out_err:
1004 while (!list_empty(head)) { 1010 while (!list_empty(head)) {
1005 req = nfs_list_entry(head->next); 1011 req = nfs_list_entry(head->next);
1006 nfs_list_remove_request(req); 1012 nfs_list_remove_request(req);
1007 nfs_mark_request_dirty(req); 1013 nfs_redirty_request(req);
1008 nfs_clear_page_writeback(req); 1014 nfs_clear_page_writeback(req);
1009 } 1015 }
1010 return error; 1016 return error;
@@ -1320,7 +1326,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1320 } 1326 }
1321 /* We have a mismatch. Write the page again */ 1327 /* We have a mismatch. Write the page again */
1322 dprintk(" mismatch\n"); 1328 dprintk(" mismatch\n");
1323 nfs_mark_request_dirty(req); 1329 nfs_redirty_request(req);
1324 next: 1330 next:
1325 nfs_clear_page_writeback(req); 1331 nfs_clear_page_writeback(req);
1326 } 1332 }
@@ -1451,13 +1457,18 @@ int nfs_wb_all(struct inode *inode)
1451 .bdi = mapping->backing_dev_info, 1457 .bdi = mapping->backing_dev_info,
1452 .sync_mode = WB_SYNC_ALL, 1458 .sync_mode = WB_SYNC_ALL,
1453 .nr_to_write = LONG_MAX, 1459 .nr_to_write = LONG_MAX,
1460 .for_writepages = 1,
1454 .range_cyclic = 1, 1461 .range_cyclic = 1,
1455 }; 1462 };
1456 int ret; 1463 int ret;
1457 1464
1465 ret = generic_writepages(mapping, &wbc);
1466 if (ret < 0)
1467 goto out;
1458 ret = nfs_sync_mapping_wait(mapping, &wbc, 0); 1468 ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
1459 if (ret >= 0) 1469 if (ret >= 0)
1460 return 0; 1470 return 0;
1471out:
1461 return ret; 1472 return ret;
1462} 1473}
1463 1474
@@ -1469,16 +1480,23 @@ int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, lo
1469 .nr_to_write = LONG_MAX, 1480 .nr_to_write = LONG_MAX,
1470 .range_start = range_start, 1481 .range_start = range_start,
1471 .range_end = range_end, 1482 .range_end = range_end,
1483 .for_writepages = 1,
1472 }; 1484 };
1473 int ret; 1485 int ret;
1474 1486
1487 if (!(how & FLUSH_NOWRITEPAGE)) {
1488 ret = generic_writepages(mapping, &wbc);
1489 if (ret < 0)
1490 goto out;
1491 }
1475 ret = nfs_sync_mapping_wait(mapping, &wbc, how); 1492 ret = nfs_sync_mapping_wait(mapping, &wbc, how);
1476 if (ret >= 0) 1493 if (ret >= 0)
1477 return 0; 1494 return 0;
1495out:
1478 return ret; 1496 return ret;
1479} 1497}
1480 1498
1481static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) 1499int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1482{ 1500{
1483 loff_t range_start = page_offset(page); 1501 loff_t range_start = page_offset(page);
1484 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1502 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);