diff options
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 108 |
1 files changed, 54 insertions, 54 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 6f7a4af3bc46..5130eda231d7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -220,7 +220,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, | |||
220 | ClearPageError(page); | 220 | ClearPageError(page); |
221 | 221 | ||
222 | io_error: | 222 | io_error: |
223 | nfs_end_data_update_defer(inode); | 223 | nfs_end_data_update(inode); |
224 | nfs_writedata_free(wdata); | 224 | nfs_writedata_free(wdata); |
225 | return written ? written : result; | 225 | return written ? written : result; |
226 | } | 226 | } |
@@ -352,7 +352,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
352 | if (err < 0) | 352 | if (err < 0) |
353 | goto out; | 353 | goto out; |
354 | } | 354 | } |
355 | err = nfs_commit_inode(inode, 0, 0, wb_priority(wbc)); | 355 | err = nfs_commit_inode(inode, wb_priority(wbc)); |
356 | if (err > 0) { | 356 | if (err > 0) { |
357 | wbc->nr_to_write -= err; | 357 | wbc->nr_to_write -= err; |
358 | err = 0; | 358 | err = 0; |
@@ -401,7 +401,7 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
401 | nfsi->npages--; | 401 | nfsi->npages--; |
402 | if (!nfsi->npages) { | 402 | if (!nfsi->npages) { |
403 | spin_unlock(&nfsi->req_lock); | 403 | spin_unlock(&nfsi->req_lock); |
404 | nfs_end_data_update_defer(inode); | 404 | nfs_end_data_update(inode); |
405 | iput(inode); | 405 | iput(inode); |
406 | } else | 406 | } else |
407 | spin_unlock(&nfsi->req_lock); | 407 | spin_unlock(&nfsi->req_lock); |
@@ -446,6 +446,8 @@ nfs_mark_request_dirty(struct nfs_page *req) | |||
446 | struct nfs_inode *nfsi = NFS_I(inode); | 446 | struct nfs_inode *nfsi = NFS_I(inode); |
447 | 447 | ||
448 | spin_lock(&nfsi->req_lock); | 448 | spin_lock(&nfsi->req_lock); |
449 | radix_tree_tag_set(&nfsi->nfs_page_tree, | ||
450 | req->wb_index, NFS_PAGE_TAG_DIRTY); | ||
449 | nfs_list_add_request(req, &nfsi->dirty); | 451 | nfs_list_add_request(req, &nfsi->dirty); |
450 | nfsi->ndirty++; | 452 | nfsi->ndirty++; |
451 | spin_unlock(&nfsi->req_lock); | 453 | spin_unlock(&nfsi->req_lock); |
@@ -503,13 +505,12 @@ nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int | |||
503 | 505 | ||
504 | spin_lock(&nfsi->req_lock); | 506 | spin_lock(&nfsi->req_lock); |
505 | next = idx_start; | 507 | next = idx_start; |
506 | while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) { | 508 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) { |
507 | if (req->wb_index > idx_end) | 509 | if (req->wb_index > idx_end) |
508 | break; | 510 | break; |
509 | 511 | ||
510 | next = req->wb_index + 1; | 512 | next = req->wb_index + 1; |
511 | if (!NFS_WBACK_BUSY(req)) | 513 | BUG_ON(!NFS_WBACK_BUSY(req)); |
512 | continue; | ||
513 | 514 | ||
514 | atomic_inc(&req->wb_count); | 515 | atomic_inc(&req->wb_count); |
515 | spin_unlock(&nfsi->req_lock); | 516 | spin_unlock(&nfsi->req_lock); |
@@ -538,12 +539,15 @@ static int | |||
538 | nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 539 | nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) |
539 | { | 540 | { |
540 | struct nfs_inode *nfsi = NFS_I(inode); | 541 | struct nfs_inode *nfsi = NFS_I(inode); |
541 | int res; | 542 | int res = 0; |
542 | res = nfs_scan_list(&nfsi->dirty, dst, idx_start, npages); | 543 | |
543 | nfsi->ndirty -= res; | 544 | if (nfsi->ndirty != 0) { |
544 | sub_page_state(nr_dirty,res); | 545 | res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); |
545 | if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) | 546 | nfsi->ndirty -= res; |
546 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); | 547 | sub_page_state(nr_dirty,res); |
548 | if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) | ||
549 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); | ||
550 | } | ||
547 | return res; | 551 | return res; |
548 | } | 552 | } |
549 | 553 | ||
@@ -562,11 +566,14 @@ static int | |||
562 | nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 566 | nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) |
563 | { | 567 | { |
564 | struct nfs_inode *nfsi = NFS_I(inode); | 568 | struct nfs_inode *nfsi = NFS_I(inode); |
565 | int res; | 569 | int res = 0; |
566 | res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages); | 570 | |
567 | nfsi->ncommit -= res; | 571 | if (nfsi->ncommit != 0) { |
568 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) | 572 | res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages); |
569 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); | 573 | nfsi->ncommit -= res; |
574 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) | ||
575 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); | ||
576 | } | ||
570 | return res; | 577 | return res; |
571 | } | 578 | } |
572 | #endif | 579 | #endif |
@@ -750,7 +757,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
750 | * is entirely in cache, it may be more efficient to avoid | 757 | * is entirely in cache, it may be more efficient to avoid |
751 | * fragmenting write requests. | 758 | * fragmenting write requests. |
752 | */ | 759 | */ |
753 | if (PageUptodate(page) && inode->i_flock == NULL) { | 760 | if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) { |
754 | loff_t end_offs = i_size_read(inode) - 1; | 761 | loff_t end_offs = i_size_read(inode) - 1; |
755 | unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; | 762 | unsigned long end_index = end_offs >> PAGE_CACHE_SHIFT; |
756 | 763 | ||
@@ -821,7 +828,7 @@ out: | |||
821 | #else | 828 | #else |
822 | nfs_inode_remove_request(req); | 829 | nfs_inode_remove_request(req); |
823 | #endif | 830 | #endif |
824 | nfs_unlock_request(req); | 831 | nfs_clear_page_writeback(req); |
825 | } | 832 | } |
826 | 833 | ||
827 | static inline int flush_task_priority(int how) | 834 | static inline int flush_task_priority(int how) |
@@ -952,7 +959,7 @@ out_bad: | |||
952 | nfs_writedata_free(data); | 959 | nfs_writedata_free(data); |
953 | } | 960 | } |
954 | nfs_mark_request_dirty(req); | 961 | nfs_mark_request_dirty(req); |
955 | nfs_unlock_request(req); | 962 | nfs_clear_page_writeback(req); |
956 | return -ENOMEM; | 963 | return -ENOMEM; |
957 | } | 964 | } |
958 | 965 | ||
@@ -1002,7 +1009,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how) | |||
1002 | struct nfs_page *req = nfs_list_entry(head->next); | 1009 | struct nfs_page *req = nfs_list_entry(head->next); |
1003 | nfs_list_remove_request(req); | 1010 | nfs_list_remove_request(req); |
1004 | nfs_mark_request_dirty(req); | 1011 | nfs_mark_request_dirty(req); |
1005 | nfs_unlock_request(req); | 1012 | nfs_clear_page_writeback(req); |
1006 | } | 1013 | } |
1007 | return -ENOMEM; | 1014 | return -ENOMEM; |
1008 | } | 1015 | } |
@@ -1029,7 +1036,7 @@ nfs_flush_list(struct list_head *head, int wpages, int how) | |||
1029 | req = nfs_list_entry(head->next); | 1036 | req = nfs_list_entry(head->next); |
1030 | nfs_list_remove_request(req); | 1037 | nfs_list_remove_request(req); |
1031 | nfs_mark_request_dirty(req); | 1038 | nfs_mark_request_dirty(req); |
1032 | nfs_unlock_request(req); | 1039 | nfs_clear_page_writeback(req); |
1033 | } | 1040 | } |
1034 | return error; | 1041 | return error; |
1035 | } | 1042 | } |
@@ -1121,7 +1128,7 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status) | |||
1121 | nfs_inode_remove_request(req); | 1128 | nfs_inode_remove_request(req); |
1122 | #endif | 1129 | #endif |
1123 | next: | 1130 | next: |
1124 | nfs_unlock_request(req); | 1131 | nfs_clear_page_writeback(req); |
1125 | } | 1132 | } |
1126 | } | 1133 | } |
1127 | 1134 | ||
@@ -1210,36 +1217,24 @@ static void nfs_commit_rpcsetup(struct list_head *head, | |||
1210 | struct nfs_write_data *data, int how) | 1217 | struct nfs_write_data *data, int how) |
1211 | { | 1218 | { |
1212 | struct rpc_task *task = &data->task; | 1219 | struct rpc_task *task = &data->task; |
1213 | struct nfs_page *first, *last; | 1220 | struct nfs_page *first; |
1214 | struct inode *inode; | 1221 | struct inode *inode; |
1215 | loff_t start, end, len; | ||
1216 | 1222 | ||
1217 | /* Set up the RPC argument and reply structs | 1223 | /* Set up the RPC argument and reply structs |
1218 | * NB: take care not to mess about with data->commit et al. */ | 1224 | * NB: take care not to mess about with data->commit et al. */ |
1219 | 1225 | ||
1220 | list_splice_init(head, &data->pages); | 1226 | list_splice_init(head, &data->pages); |
1221 | first = nfs_list_entry(data->pages.next); | 1227 | first = nfs_list_entry(data->pages.next); |
1222 | last = nfs_list_entry(data->pages.prev); | ||
1223 | inode = first->wb_context->dentry->d_inode; | 1228 | inode = first->wb_context->dentry->d_inode; |
1224 | 1229 | ||
1225 | /* | ||
1226 | * Determine the offset range of requests in the COMMIT call. | ||
1227 | * We rely on the fact that data->pages is an ordered list... | ||
1228 | */ | ||
1229 | start = req_offset(first); | ||
1230 | end = req_offset(last) + last->wb_bytes; | ||
1231 | len = end - start; | ||
1232 | /* If 'len' is not a 32-bit quantity, pass '0' in the COMMIT call */ | ||
1233 | if (end >= i_size_read(inode) || len < 0 || len > (~((u32)0) >> 1)) | ||
1234 | len = 0; | ||
1235 | |||
1236 | data->inode = inode; | 1230 | data->inode = inode; |
1237 | data->cred = first->wb_context->cred; | 1231 | data->cred = first->wb_context->cred; |
1238 | 1232 | ||
1239 | data->args.fh = NFS_FH(data->inode); | 1233 | data->args.fh = NFS_FH(data->inode); |
1240 | data->args.offset = start; | 1234 | /* Note: we always request a commit of the entire inode */ |
1241 | data->args.count = len; | 1235 | data->args.offset = 0; |
1242 | data->res.count = len; | 1236 | data->args.count = 0; |
1237 | data->res.count = 0; | ||
1243 | data->res.fattr = &data->fattr; | 1238 | data->res.fattr = &data->fattr; |
1244 | data->res.verf = &data->verf; | 1239 | data->res.verf = &data->verf; |
1245 | 1240 | ||
@@ -1278,7 +1273,7 @@ nfs_commit_list(struct list_head *head, int how) | |||
1278 | req = nfs_list_entry(head->next); | 1273 | req = nfs_list_entry(head->next); |
1279 | nfs_list_remove_request(req); | 1274 | nfs_list_remove_request(req); |
1280 | nfs_mark_request_commit(req); | 1275 | nfs_mark_request_commit(req); |
1281 | nfs_unlock_request(req); | 1276 | nfs_clear_page_writeback(req); |
1282 | } | 1277 | } |
1283 | return -ENOMEM; | 1278 | return -ENOMEM; |
1284 | } | 1279 | } |
@@ -1324,7 +1319,7 @@ nfs_commit_done(struct rpc_task *task) | |||
1324 | dprintk(" mismatch\n"); | 1319 | dprintk(" mismatch\n"); |
1325 | nfs_mark_request_dirty(req); | 1320 | nfs_mark_request_dirty(req); |
1326 | next: | 1321 | next: |
1327 | nfs_unlock_request(req); | 1322 | nfs_clear_page_writeback(req); |
1328 | res++; | 1323 | res++; |
1329 | } | 1324 | } |
1330 | sub_page_state(nr_unstable,res); | 1325 | sub_page_state(nr_unstable,res); |
@@ -1342,16 +1337,23 @@ static int nfs_flush_inode(struct inode *inode, unsigned long idx_start, | |||
1342 | spin_lock(&nfsi->req_lock); | 1337 | spin_lock(&nfsi->req_lock); |
1343 | res = nfs_scan_dirty(inode, &head, idx_start, npages); | 1338 | res = nfs_scan_dirty(inode, &head, idx_start, npages); |
1344 | spin_unlock(&nfsi->req_lock); | 1339 | spin_unlock(&nfsi->req_lock); |
1345 | if (res) | 1340 | if (res) { |
1346 | error = nfs_flush_list(&head, NFS_SERVER(inode)->wpages, how); | 1341 | struct nfs_server *server = NFS_SERVER(inode); |
1342 | |||
1343 | /* For single writes, FLUSH_STABLE is more efficient */ | ||
1344 | if (res == nfsi->npages && nfsi->npages <= server->wpages) { | ||
1345 | if (res > 1 || nfs_list_entry(head.next)->wb_bytes <= server->wsize) | ||
1346 | how |= FLUSH_STABLE; | ||
1347 | } | ||
1348 | error = nfs_flush_list(&head, server->wpages, how); | ||
1349 | } | ||
1347 | if (error < 0) | 1350 | if (error < 0) |
1348 | return error; | 1351 | return error; |
1349 | return res; | 1352 | return res; |
1350 | } | 1353 | } |
1351 | 1354 | ||
1352 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1355 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1353 | int nfs_commit_inode(struct inode *inode, unsigned long idx_start, | 1356 | int nfs_commit_inode(struct inode *inode, int how) |
1354 | unsigned int npages, int how) | ||
1355 | { | 1357 | { |
1356 | struct nfs_inode *nfsi = NFS_I(inode); | 1358 | struct nfs_inode *nfsi = NFS_I(inode); |
1357 | LIST_HEAD(head); | 1359 | LIST_HEAD(head); |
@@ -1359,15 +1361,13 @@ int nfs_commit_inode(struct inode *inode, unsigned long idx_start, | |||
1359 | error = 0; | 1361 | error = 0; |
1360 | 1362 | ||
1361 | spin_lock(&nfsi->req_lock); | 1363 | spin_lock(&nfsi->req_lock); |
1362 | res = nfs_scan_commit(inode, &head, idx_start, npages); | 1364 | res = nfs_scan_commit(inode, &head, 0, 0); |
1365 | spin_unlock(&nfsi->req_lock); | ||
1363 | if (res) { | 1366 | if (res) { |
1364 | res += nfs_scan_commit(inode, &head, 0, 0); | ||
1365 | spin_unlock(&nfsi->req_lock); | ||
1366 | error = nfs_commit_list(&head, how); | 1367 | error = nfs_commit_list(&head, how); |
1367 | } else | 1368 | if (error < 0) |
1368 | spin_unlock(&nfsi->req_lock); | 1369 | return error; |
1369 | if (error < 0) | 1370 | } |
1370 | return error; | ||
1371 | return res; | 1371 | return res; |
1372 | } | 1372 | } |
1373 | #endif | 1373 | #endif |
@@ -1389,7 +1389,7 @@ int nfs_sync_inode(struct inode *inode, unsigned long idx_start, | |||
1389 | error = nfs_flush_inode(inode, idx_start, npages, how); | 1389 | error = nfs_flush_inode(inode, idx_start, npages, how); |
1390 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1390 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
1391 | if (error == 0) | 1391 | if (error == 0) |
1392 | error = nfs_commit_inode(inode, idx_start, npages, how); | 1392 | error = nfs_commit_inode(inode, how); |
1393 | #endif | 1393 | #endif |
1394 | } while (error > 0); | 1394 | } while (error > 0); |
1395 | return error; | 1395 | return error; |