diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-16 17:49:49 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-16 17:49:49 -0400 |
| commit | 8df1b049bc86495a40e421abc8b9cf1dda32f0d9 (patch) | |
| tree | ed0d7f582b401852a9ea98f572076131950a15c4 /fs/nfs/write.c | |
| parent | a3cf859321486f69506326146ab3e2fd15c05c3f (diff) | |
| parent | cadc723cc19ce6b881d973d3c04e25ebb83058e6 (diff) | |
Merge git://git.linux-nfs.org/projects/trondmy/nfs-2.6
* git://git.linux-nfs.org/projects/trondmy/nfs-2.6: (82 commits)
NFSv4: Remove BKL from the nfsv4 state recovery
SUNRPC: Remove the BKL from the callback functions
NFS: Remove BKL from the readdir code
NFS: Remove BKL from the symlink code
NFS: Remove BKL from the sillydelete operations
NFS: Remove the BKL from the rename, rmdir and unlink operations
NFS: Remove BKL from NFS lookup code
NFS: Remove the BKL from nfs_link()
NFS: Remove the BKL from the inode creation operations
NFS: Remove BKL usage from open()
NFS: Remove BKL usage from the write path
NFS: Remove the BKL from the permission checking code
NFS: Remove attribute update related BKL references
NFS: Remove BKL requirement from attribute updates
NFS: Protect inode->i_nlink updates using inode->i_lock
nfs: set correct fl_len in nlmclnt_test()
SUNRPC: Support registering IPv6 interfaces with local rpcbind daemon
SUNRPC: Refactor rpcb_register to make rpcbindv4 support easier
SUNRPC: None of rpcb_create's callers wants a privileged source port
SUNRPC: Introduce a specific rpcb_create for contacting localhost
...
Diffstat (limited to 'fs/nfs/write.c')
| -rw-r--r-- | fs/nfs/write.c | 322 |
1 files changed, 157 insertions, 165 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f333848fd3be..3229e217c773 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -34,9 +34,6 @@ | |||
| 34 | /* | 34 | /* |
| 35 | * Local function declarations | 35 | * Local function declarations |
| 36 | */ | 36 | */ |
| 37 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, | ||
| 38 | struct page *, | ||
| 39 | unsigned int, unsigned int); | ||
| 40 | static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, | 37 | static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, |
| 41 | struct inode *inode, int ioflags); | 38 | struct inode *inode, int ioflags); |
| 42 | static void nfs_redirty_request(struct nfs_page *req); | 39 | static void nfs_redirty_request(struct nfs_page *req); |
| @@ -136,16 +133,21 @@ static struct nfs_page *nfs_page_find_request(struct page *page) | |||
| 136 | static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) | 133 | static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) |
| 137 | { | 134 | { |
| 138 | struct inode *inode = page->mapping->host; | 135 | struct inode *inode = page->mapping->host; |
| 139 | loff_t end, i_size = i_size_read(inode); | 136 | loff_t end, i_size; |
| 140 | pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; | 137 | pgoff_t end_index; |
| 141 | 138 | ||
| 139 | spin_lock(&inode->i_lock); | ||
| 140 | i_size = i_size_read(inode); | ||
| 141 | end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; | ||
| 142 | if (i_size > 0 && page->index < end_index) | 142 | if (i_size > 0 && page->index < end_index) |
| 143 | return; | 143 | goto out; |
| 144 | end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); | 144 | end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count); |
| 145 | if (i_size >= end) | 145 | if (i_size >= end) |
| 146 | return; | 146 | goto out; |
| 147 | nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); | ||
| 148 | i_size_write(inode, end); | 147 | i_size_write(inode, end); |
| 148 | nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); | ||
| 149 | out: | ||
| 150 | spin_unlock(&inode->i_lock); | ||
| 149 | } | 151 | } |
| 150 | 152 | ||
| 151 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ | 153 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ |
| @@ -169,29 +171,6 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int | |||
| 169 | SetPageUptodate(page); | 171 | SetPageUptodate(page); |
| 170 | } | 172 | } |
| 171 | 173 | ||
| 172 | static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | ||
| 173 | unsigned int offset, unsigned int count) | ||
| 174 | { | ||
| 175 | struct nfs_page *req; | ||
| 176 | int ret; | ||
| 177 | |||
| 178 | for (;;) { | ||
| 179 | req = nfs_update_request(ctx, page, offset, count); | ||
| 180 | if (!IS_ERR(req)) | ||
| 181 | break; | ||
| 182 | ret = PTR_ERR(req); | ||
| 183 | if (ret != -EBUSY) | ||
| 184 | return ret; | ||
| 185 | ret = nfs_wb_page(page->mapping->host, page); | ||
| 186 | if (ret != 0) | ||
| 187 | return ret; | ||
| 188 | } | ||
| 189 | /* Update file length */ | ||
| 190 | nfs_grow_file(page, offset, count); | ||
| 191 | nfs_clear_page_tag_locked(req); | ||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | static int wb_priority(struct writeback_control *wbc) | 174 | static int wb_priority(struct writeback_control *wbc) |
| 196 | { | 175 | { |
| 197 | if (wbc->for_reclaim) | 176 | if (wbc->for_reclaim) |
| @@ -268,12 +247,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
| 268 | return ret; | 247 | return ret; |
| 269 | spin_lock(&inode->i_lock); | 248 | spin_lock(&inode->i_lock); |
| 270 | } | 249 | } |
| 271 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { | 250 | if (test_bit(PG_CLEAN, &req->wb_flags)) { |
| 272 | /* This request is marked for commit */ | ||
| 273 | spin_unlock(&inode->i_lock); | 251 | spin_unlock(&inode->i_lock); |
| 274 | nfs_clear_page_tag_locked(req); | 252 | BUG(); |
| 275 | nfs_pageio_complete(pgio); | ||
| 276 | return 0; | ||
| 277 | } | 253 | } |
| 278 | if (nfs_set_page_writeback(page) != 0) { | 254 | if (nfs_set_page_writeback(page) != 0) { |
| 279 | spin_unlock(&inode->i_lock); | 255 | spin_unlock(&inode->i_lock); |
| @@ -355,11 +331,19 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
| 355 | /* | 331 | /* |
| 356 | * Insert a write request into an inode | 332 | * Insert a write request into an inode |
| 357 | */ | 333 | */ |
| 358 | static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | 334 | static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) |
| 359 | { | 335 | { |
| 360 | struct nfs_inode *nfsi = NFS_I(inode); | 336 | struct nfs_inode *nfsi = NFS_I(inode); |
| 361 | int error; | 337 | int error; |
| 362 | 338 | ||
| 339 | error = radix_tree_preload(GFP_NOFS); | ||
| 340 | if (error != 0) | ||
| 341 | goto out; | ||
| 342 | |||
| 343 | /* Lock the request! */ | ||
| 344 | nfs_lock_request_dontget(req); | ||
| 345 | |||
| 346 | spin_lock(&inode->i_lock); | ||
| 363 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); | 347 | error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); |
| 364 | BUG_ON(error); | 348 | BUG_ON(error); |
| 365 | if (!nfsi->npages) { | 349 | if (!nfsi->npages) { |
| @@ -373,6 +357,10 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
| 373 | kref_get(&req->wb_kref); | 357 | kref_get(&req->wb_kref); |
| 374 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, | 358 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, |
| 375 | NFS_PAGE_TAG_LOCKED); | 359 | NFS_PAGE_TAG_LOCKED); |
| 360 | spin_unlock(&inode->i_lock); | ||
| 361 | radix_tree_preload_end(); | ||
| 362 | out: | ||
| 363 | return error; | ||
| 376 | } | 364 | } |
| 377 | 365 | ||
| 378 | /* | 366 | /* |
| @@ -405,19 +393,6 @@ nfs_mark_request_dirty(struct nfs_page *req) | |||
| 405 | __set_page_dirty_nobuffers(req->wb_page); | 393 | __set_page_dirty_nobuffers(req->wb_page); |
| 406 | } | 394 | } |
| 407 | 395 | ||
| 408 | /* | ||
| 409 | * Check if a request is dirty | ||
| 410 | */ | ||
| 411 | static inline int | ||
| 412 | nfs_dirty_request(struct nfs_page *req) | ||
| 413 | { | ||
| 414 | struct page *page = req->wb_page; | ||
| 415 | |||
| 416 | if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags)) | ||
| 417 | return 0; | ||
| 418 | return !PageWriteback(page); | ||
| 419 | } | ||
| 420 | |||
| 421 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 396 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
| 422 | /* | 397 | /* |
| 423 | * Add a request to the inode's commit list. | 398 | * Add a request to the inode's commit list. |
| @@ -430,7 +405,7 @@ nfs_mark_request_commit(struct nfs_page *req) | |||
| 430 | 405 | ||
| 431 | spin_lock(&inode->i_lock); | 406 | spin_lock(&inode->i_lock); |
| 432 | nfsi->ncommit++; | 407 | nfsi->ncommit++; |
| 433 | set_bit(PG_NEED_COMMIT, &(req)->wb_flags); | 408 | set_bit(PG_CLEAN, &(req)->wb_flags); |
| 434 | radix_tree_tag_set(&nfsi->nfs_page_tree, | 409 | radix_tree_tag_set(&nfsi->nfs_page_tree, |
| 435 | req->wb_index, | 410 | req->wb_index, |
| 436 | NFS_PAGE_TAG_COMMIT); | 411 | NFS_PAGE_TAG_COMMIT); |
| @@ -440,6 +415,19 @@ nfs_mark_request_commit(struct nfs_page *req) | |||
| 440 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 415 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
| 441 | } | 416 | } |
| 442 | 417 | ||
| 418 | static int | ||
| 419 | nfs_clear_request_commit(struct nfs_page *req) | ||
| 420 | { | ||
| 421 | struct page *page = req->wb_page; | ||
| 422 | |||
| 423 | if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) { | ||
| 424 | dec_zone_page_state(page, NR_UNSTABLE_NFS); | ||
| 425 | dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE); | ||
| 426 | return 1; | ||
| 427 | } | ||
| 428 | return 0; | ||
| 429 | } | ||
| 430 | |||
| 443 | static inline | 431 | static inline |
| 444 | int nfs_write_need_commit(struct nfs_write_data *data) | 432 | int nfs_write_need_commit(struct nfs_write_data *data) |
| 445 | { | 433 | { |
| @@ -449,7 +437,7 @@ int nfs_write_need_commit(struct nfs_write_data *data) | |||
| 449 | static inline | 437 | static inline |
| 450 | int nfs_reschedule_unstable_write(struct nfs_page *req) | 438 | int nfs_reschedule_unstable_write(struct nfs_page *req) |
| 451 | { | 439 | { |
| 452 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { | 440 | if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) { |
| 453 | nfs_mark_request_commit(req); | 441 | nfs_mark_request_commit(req); |
| 454 | return 1; | 442 | return 1; |
| 455 | } | 443 | } |
| @@ -465,6 +453,12 @@ nfs_mark_request_commit(struct nfs_page *req) | |||
| 465 | { | 453 | { |
| 466 | } | 454 | } |
| 467 | 455 | ||
| 456 | static inline int | ||
| 457 | nfs_clear_request_commit(struct nfs_page *req) | ||
| 458 | { | ||
| 459 | return 0; | ||
| 460 | } | ||
| 461 | |||
| 468 | static inline | 462 | static inline |
| 469 | int nfs_write_need_commit(struct nfs_write_data *data) | 463 | int nfs_write_need_commit(struct nfs_write_data *data) |
| 470 | { | 464 | { |
| @@ -522,11 +516,8 @@ static void nfs_cancel_commit_list(struct list_head *head) | |||
| 522 | 516 | ||
| 523 | while(!list_empty(head)) { | 517 | while(!list_empty(head)) { |
| 524 | req = nfs_list_entry(head->next); | 518 | req = nfs_list_entry(head->next); |
| 525 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
| 526 | dec_bdi_stat(req->wb_page->mapping->backing_dev_info, | ||
| 527 | BDI_RECLAIMABLE); | ||
| 528 | nfs_list_remove_request(req); | 519 | nfs_list_remove_request(req); |
| 529 | clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); | 520 | nfs_clear_request_commit(req); |
| 530 | nfs_inode_remove_request(req); | 521 | nfs_inode_remove_request(req); |
| 531 | nfs_unlock_request(req); | 522 | nfs_unlock_request(req); |
| 532 | } | 523 | } |
| @@ -564,110 +555,124 @@ static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pg | |||
| 564 | #endif | 555 | #endif |
| 565 | 556 | ||
| 566 | /* | 557 | /* |
| 567 | * Try to update any existing write request, or create one if there is none. | 558 | * Search for an existing write request, and attempt to update |
| 568 | * In order to match, the request's credentials must match those of | 559 | * it to reflect a new dirty region on a given page. |
| 569 | * the calling process. | ||
| 570 | * | 560 | * |
| 571 | * Note: Should always be called with the Page Lock held! | 561 | * If the attempt fails, then the existing request is flushed out |
| 562 | * to disk. | ||
| 572 | */ | 563 | */ |
| 573 | static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | 564 | static struct nfs_page *nfs_try_to_update_request(struct inode *inode, |
| 574 | struct page *page, unsigned int offset, unsigned int bytes) | 565 | struct page *page, |
| 566 | unsigned int offset, | ||
| 567 | unsigned int bytes) | ||
| 575 | { | 568 | { |
| 576 | struct address_space *mapping = page->mapping; | 569 | struct nfs_page *req; |
| 577 | struct inode *inode = mapping->host; | 570 | unsigned int rqend; |
| 578 | struct nfs_page *req, *new = NULL; | 571 | unsigned int end; |
| 579 | pgoff_t rqend, end; | 572 | int error; |
| 573 | |||
| 574 | if (!PagePrivate(page)) | ||
| 575 | return NULL; | ||
| 580 | 576 | ||
| 581 | end = offset + bytes; | 577 | end = offset + bytes; |
| 578 | spin_lock(&inode->i_lock); | ||
| 582 | 579 | ||
| 583 | for (;;) { | 580 | for (;;) { |
| 584 | /* Loop over all inode entries and see if we find | 581 | req = nfs_page_find_request_locked(page); |
| 585 | * A request for the page we wish to update | 582 | if (req == NULL) |
| 583 | goto out_unlock; | ||
| 584 | |||
| 585 | rqend = req->wb_offset + req->wb_bytes; | ||
| 586 | /* | ||
| 587 | * Tell the caller to flush out the request if | ||
| 588 | * the offsets are non-contiguous. | ||
| 589 | * Note: nfs_flush_incompatible() will already | ||
| 590 | * have flushed out requests having wrong owners. | ||
| 586 | */ | 591 | */ |
| 587 | if (new) { | 592 | if (offset > rqend |
| 588 | if (radix_tree_preload(GFP_NOFS)) { | 593 | || end < req->wb_offset) |
| 589 | nfs_release_request(new); | 594 | goto out_flushme; |
| 590 | return ERR_PTR(-ENOMEM); | ||
| 591 | } | ||
| 592 | } | ||
| 593 | 595 | ||
| 594 | spin_lock(&inode->i_lock); | 596 | if (nfs_set_page_tag_locked(req)) |
| 595 | req = nfs_page_find_request_locked(page); | ||
| 596 | if (req) { | ||
| 597 | if (!nfs_set_page_tag_locked(req)) { | ||
| 598 | int error; | ||
| 599 | |||
| 600 | spin_unlock(&inode->i_lock); | ||
| 601 | error = nfs_wait_on_request(req); | ||
| 602 | nfs_release_request(req); | ||
| 603 | if (error < 0) { | ||
| 604 | if (new) { | ||
| 605 | radix_tree_preload_end(); | ||
| 606 | nfs_release_request(new); | ||
| 607 | } | ||
| 608 | return ERR_PTR(error); | ||
| 609 | } | ||
| 610 | continue; | ||
| 611 | } | ||
| 612 | spin_unlock(&inode->i_lock); | ||
| 613 | if (new) { | ||
| 614 | radix_tree_preload_end(); | ||
| 615 | nfs_release_request(new); | ||
| 616 | } | ||
| 617 | break; | 597 | break; |
| 618 | } | ||
| 619 | 598 | ||
| 620 | if (new) { | 599 | /* The request is locked, so wait and then retry */ |
| 621 | nfs_lock_request_dontget(new); | ||
| 622 | nfs_inode_add_request(inode, new); | ||
| 623 | spin_unlock(&inode->i_lock); | ||
| 624 | radix_tree_preload_end(); | ||
| 625 | req = new; | ||
| 626 | goto zero_page; | ||
| 627 | } | ||
| 628 | spin_unlock(&inode->i_lock); | 600 | spin_unlock(&inode->i_lock); |
| 629 | 601 | error = nfs_wait_on_request(req); | |
| 630 | new = nfs_create_request(ctx, inode, page, offset, bytes); | 602 | nfs_release_request(req); |
| 631 | if (IS_ERR(new)) | 603 | if (error != 0) |
| 632 | return new; | 604 | goto out_err; |
| 605 | spin_lock(&inode->i_lock); | ||
| 633 | } | 606 | } |
| 634 | 607 | ||
| 635 | /* We have a request for our page. | 608 | if (nfs_clear_request_commit(req)) |
| 636 | * If the creds don't match, or the | 609 | radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, |
| 637 | * page addresses don't match, | 610 | req->wb_index, NFS_PAGE_TAG_COMMIT); |
| 638 | * tell the caller to wait on the conflicting | ||
| 639 | * request. | ||
| 640 | */ | ||
| 641 | rqend = req->wb_offset + req->wb_bytes; | ||
| 642 | if (req->wb_context != ctx | ||
| 643 | || req->wb_page != page | ||
| 644 | || !nfs_dirty_request(req) | ||
| 645 | || offset > rqend || end < req->wb_offset) { | ||
| 646 | nfs_clear_page_tag_locked(req); | ||
| 647 | return ERR_PTR(-EBUSY); | ||
| 648 | } | ||
| 649 | 611 | ||
| 650 | /* Okay, the request matches. Update the region */ | 612 | /* Okay, the request matches. Update the region */ |
| 651 | if (offset < req->wb_offset) { | 613 | if (offset < req->wb_offset) { |
| 652 | req->wb_offset = offset; | 614 | req->wb_offset = offset; |
| 653 | req->wb_pgbase = offset; | 615 | req->wb_pgbase = offset; |
| 654 | req->wb_bytes = max(end, rqend) - req->wb_offset; | ||
| 655 | goto zero_page; | ||
| 656 | } | 616 | } |
| 657 | |||
| 658 | if (end > rqend) | 617 | if (end > rqend) |
| 659 | req->wb_bytes = end - req->wb_offset; | 618 | req->wb_bytes = end - req->wb_offset; |
| 660 | 619 | else | |
| 620 | req->wb_bytes = rqend - req->wb_offset; | ||
| 621 | out_unlock: | ||
| 622 | spin_unlock(&inode->i_lock); | ||
| 661 | return req; | 623 | return req; |
| 662 | zero_page: | 624 | out_flushme: |
| 663 | /* If this page might potentially be marked as up to date, | 625 | spin_unlock(&inode->i_lock); |
| 664 | * then we need to zero any uninitalised data. */ | 626 | nfs_release_request(req); |
| 665 | if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE | 627 | error = nfs_wb_page(inode, page); |
| 666 | && !PageUptodate(req->wb_page)) | 628 | out_err: |
| 667 | zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE); | 629 | return ERR_PTR(error); |
| 630 | } | ||
| 631 | |||
| 632 | /* | ||
| 633 | * Try to update an existing write request, or create one if there is none. | ||
| 634 | * | ||
| 635 | * Note: Should always be called with the Page Lock held to prevent races | ||
| 636 | * if we have to add a new request. Also assumes that the caller has | ||
| 637 | * already called nfs_flush_incompatible() if necessary. | ||
| 638 | */ | ||
| 639 | static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, | ||
| 640 | struct page *page, unsigned int offset, unsigned int bytes) | ||
| 641 | { | ||
| 642 | struct inode *inode = page->mapping->host; | ||
| 643 | struct nfs_page *req; | ||
| 644 | int error; | ||
| 645 | |||
| 646 | req = nfs_try_to_update_request(inode, page, offset, bytes); | ||
| 647 | if (req != NULL) | ||
| 648 | goto out; | ||
| 649 | req = nfs_create_request(ctx, inode, page, offset, bytes); | ||
| 650 | if (IS_ERR(req)) | ||
| 651 | goto out; | ||
| 652 | error = nfs_inode_add_request(inode, req); | ||
| 653 | if (error != 0) { | ||
| 654 | nfs_release_request(req); | ||
| 655 | req = ERR_PTR(error); | ||
| 656 | } | ||
| 657 | out: | ||
| 668 | return req; | 658 | return req; |
| 669 | } | 659 | } |
| 670 | 660 | ||
| 661 | static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | ||
| 662 | unsigned int offset, unsigned int count) | ||
| 663 | { | ||
| 664 | struct nfs_page *req; | ||
| 665 | |||
| 666 | req = nfs_setup_write_request(ctx, page, offset, count); | ||
| 667 | if (IS_ERR(req)) | ||
| 668 | return PTR_ERR(req); | ||
| 669 | /* Update file length */ | ||
| 670 | nfs_grow_file(page, offset, count); | ||
| 671 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | ||
| 672 | nfs_clear_page_tag_locked(req); | ||
| 673 | return 0; | ||
| 674 | } | ||
| 675 | |||
| 671 | int nfs_flush_incompatible(struct file *file, struct page *page) | 676 | int nfs_flush_incompatible(struct file *file, struct page *page) |
| 672 | { | 677 | { |
| 673 | struct nfs_open_context *ctx = nfs_file_open_context(file); | 678 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
| @@ -685,8 +690,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
| 685 | req = nfs_page_find_request(page); | 690 | req = nfs_page_find_request(page); |
| 686 | if (req == NULL) | 691 | if (req == NULL) |
| 687 | return 0; | 692 | return 0; |
| 688 | do_flush = req->wb_page != page || req->wb_context != ctx | 693 | do_flush = req->wb_page != page || req->wb_context != ctx; |
| 689 | || !nfs_dirty_request(req); | ||
| 690 | nfs_release_request(req); | 694 | nfs_release_request(req); |
| 691 | if (!do_flush) | 695 | if (!do_flush) |
| 692 | return 0; | 696 | return 0; |
| @@ -721,10 +725,10 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
| 721 | 725 | ||
| 722 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); | 726 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
| 723 | 727 | ||
| 724 | dprintk("NFS: nfs_updatepage(%s/%s %d@%Ld)\n", | 728 | dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n", |
| 725 | file->f_path.dentry->d_parent->d_name.name, | 729 | file->f_path.dentry->d_parent->d_name.name, |
| 726 | file->f_path.dentry->d_name.name, count, | 730 | file->f_path.dentry->d_name.name, count, |
| 727 | (long long)(page_offset(page) +offset)); | 731 | (long long)(page_offset(page) + offset)); |
| 728 | 732 | ||
| 729 | /* If we're not using byte range locks, and we know the page | 733 | /* If we're not using byte range locks, and we know the page |
| 730 | * is up to date, it may be more efficient to extend the write | 734 | * is up to date, it may be more efficient to extend the write |
| @@ -744,7 +748,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
| 744 | else | 748 | else |
| 745 | __set_page_dirty_nobuffers(page); | 749 | __set_page_dirty_nobuffers(page); |
| 746 | 750 | ||
| 747 | dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n", | 751 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", |
| 748 | status, (long long)i_size_read(inode)); | 752 | status, (long long)i_size_read(inode)); |
| 749 | return status; | 753 | return status; |
| 750 | } | 754 | } |
| @@ -752,12 +756,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
| 752 | static void nfs_writepage_release(struct nfs_page *req) | 756 | static void nfs_writepage_release(struct nfs_page *req) |
| 753 | { | 757 | { |
| 754 | 758 | ||
| 755 | if (PageError(req->wb_page)) { | 759 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { |
| 756 | nfs_end_page_writeback(req->wb_page); | ||
| 757 | nfs_inode_remove_request(req); | ||
| 758 | } else if (!nfs_reschedule_unstable_write(req)) { | ||
| 759 | /* Set the PG_uptodate flag */ | ||
| 760 | nfs_mark_uptodate(req->wb_page, req->wb_pgbase, req->wb_bytes); | ||
| 761 | nfs_end_page_writeback(req->wb_page); | 760 | nfs_end_page_writeback(req->wb_page); |
| 762 | nfs_inode_remove_request(req); | 761 | nfs_inode_remove_request(req); |
| 763 | } else | 762 | } else |
| @@ -834,7 +833,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req, | |||
| 834 | NFS_PROTO(inode)->write_setup(data, &msg); | 833 | NFS_PROTO(inode)->write_setup(data, &msg); |
| 835 | 834 | ||
| 836 | dprintk("NFS: %5u initiated write call " | 835 | dprintk("NFS: %5u initiated write call " |
| 837 | "(req %s/%Ld, %u bytes @ offset %Lu)\n", | 836 | "(req %s/%lld, %u bytes @ offset %llu)\n", |
| 838 | data->task.tk_pid, | 837 | data->task.tk_pid, |
| 839 | inode->i_sb->s_id, | 838 | inode->i_sb->s_id, |
| 840 | (long long)NFS_FILEID(inode), | 839 | (long long)NFS_FILEID(inode), |
| @@ -978,13 +977,13 @@ static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, | |||
| 978 | static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | 977 | static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) |
| 979 | { | 978 | { |
| 980 | struct nfs_write_data *data = calldata; | 979 | struct nfs_write_data *data = calldata; |
| 981 | struct nfs_page *req = data->req; | ||
| 982 | 980 | ||
| 983 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | 981 | dprintk("NFS: %5u write(%s/%lld %d@%lld)", |
| 984 | req->wb_context->path.dentry->d_inode->i_sb->s_id, | 982 | task->tk_pid, |
| 985 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), | 983 | data->req->wb_context->path.dentry->d_inode->i_sb->s_id, |
| 986 | req->wb_bytes, | 984 | (long long) |
| 987 | (long long)req_offset(req)); | 985 | NFS_FILEID(data->req->wb_context->path.dentry->d_inode), |
| 986 | data->req->wb_bytes, (long long)req_offset(data->req)); | ||
| 988 | 987 | ||
| 989 | nfs_writeback_done(task, data); | 988 | nfs_writeback_done(task, data); |
| 990 | } | 989 | } |
| @@ -1058,7 +1057,8 @@ static void nfs_writeback_release_full(void *calldata) | |||
| 1058 | 1057 | ||
| 1059 | nfs_list_remove_request(req); | 1058 | nfs_list_remove_request(req); |
| 1060 | 1059 | ||
| 1061 | dprintk("NFS: write (%s/%Ld %d@%Ld)", | 1060 | dprintk("NFS: %5u write (%s/%lld %d@%lld)", |
| 1061 | data->task.tk_pid, | ||
| 1062 | req->wb_context->path.dentry->d_inode->i_sb->s_id, | 1062 | req->wb_context->path.dentry->d_inode->i_sb->s_id, |
| 1063 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), | 1063 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), |
| 1064 | req->wb_bytes, | 1064 | req->wb_bytes, |
| @@ -1078,8 +1078,6 @@ static void nfs_writeback_release_full(void *calldata) | |||
| 1078 | dprintk(" marked for commit\n"); | 1078 | dprintk(" marked for commit\n"); |
| 1079 | goto next; | 1079 | goto next; |
| 1080 | } | 1080 | } |
| 1081 | /* Set the PG_uptodate flag? */ | ||
| 1082 | nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); | ||
| 1083 | dprintk(" OK\n"); | 1081 | dprintk(" OK\n"); |
| 1084 | remove_request: | 1082 | remove_request: |
| 1085 | nfs_end_page_writeback(page); | 1083 | nfs_end_page_writeback(page); |
| @@ -1133,7 +1131,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) | |||
| 1133 | static unsigned long complain; | 1131 | static unsigned long complain; |
| 1134 | 1132 | ||
| 1135 | if (time_before(complain, jiffies)) { | 1133 | if (time_before(complain, jiffies)) { |
| 1136 | dprintk("NFS: faulty NFS server %s:" | 1134 | dprintk("NFS: faulty NFS server %s:" |
| 1137 | " (committed = %d) != (stable = %d)\n", | 1135 | " (committed = %d) != (stable = %d)\n", |
| 1138 | NFS_SERVER(data->inode)->nfs_client->cl_hostname, | 1136 | NFS_SERVER(data->inode)->nfs_client->cl_hostname, |
| 1139 | resp->verf->committed, argp->stable); | 1137 | resp->verf->committed, argp->stable); |
| @@ -1297,12 +1295,9 @@ static void nfs_commit_release(void *calldata) | |||
| 1297 | while (!list_empty(&data->pages)) { | 1295 | while (!list_empty(&data->pages)) { |
| 1298 | req = nfs_list_entry(data->pages.next); | 1296 | req = nfs_list_entry(data->pages.next); |
| 1299 | nfs_list_remove_request(req); | 1297 | nfs_list_remove_request(req); |
| 1300 | clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); | 1298 | nfs_clear_request_commit(req); |
| 1301 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
| 1302 | dec_bdi_stat(req->wb_page->mapping->backing_dev_info, | ||
| 1303 | BDI_RECLAIMABLE); | ||
| 1304 | 1299 | ||
| 1305 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", | 1300 | dprintk("NFS: commit (%s/%lld %d@%lld)", |
| 1306 | req->wb_context->path.dentry->d_inode->i_sb->s_id, | 1301 | req->wb_context->path.dentry->d_inode->i_sb->s_id, |
| 1307 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), | 1302 | (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), |
| 1308 | req->wb_bytes, | 1303 | req->wb_bytes, |
| @@ -1318,9 +1313,6 @@ static void nfs_commit_release(void *calldata) | |||
| 1318 | * returned by the server against all stored verfs. */ | 1313 | * returned by the server against all stored verfs. */ |
| 1319 | if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { | 1314 | if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) { |
| 1320 | /* We have a match */ | 1315 | /* We have a match */ |
| 1321 | /* Set the PG_uptodate flag */ | ||
| 1322 | nfs_mark_uptodate(req->wb_page, req->wb_pgbase, | ||
| 1323 | req->wb_bytes); | ||
| 1324 | nfs_inode_remove_request(req); | 1316 | nfs_inode_remove_request(req); |
| 1325 | dprintk(" OK\n"); | 1317 | dprintk(" OK\n"); |
| 1326 | goto next; | 1318 | goto next; |
| @@ -1479,7 +1471,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) | |||
| 1479 | req = nfs_page_find_request(page); | 1471 | req = nfs_page_find_request(page); |
| 1480 | if (req == NULL) | 1472 | if (req == NULL) |
| 1481 | goto out; | 1473 | goto out; |
| 1482 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { | 1474 | if (test_bit(PG_CLEAN, &req->wb_flags)) { |
| 1483 | nfs_release_request(req); | 1475 | nfs_release_request(req); |
| 1484 | break; | 1476 | break; |
| 1485 | } | 1477 | } |
