diff options
Diffstat (limited to 'fs/nfs/file.c')
-rw-r--r-- | fs/nfs/file.c | 54 |
1 files changed, 50 insertions, 4 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 05062329b678..f5fdd39e037a 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -59,7 +59,7 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); | |||
59 | static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); | 59 | static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); |
60 | static int nfs_setlease(struct file *file, long arg, struct file_lock **fl); | 60 | static int nfs_setlease(struct file *file, long arg, struct file_lock **fl); |
61 | 61 | ||
62 | static struct vm_operations_struct nfs_file_vm_ops; | 62 | static const struct vm_operations_struct nfs_file_vm_ops; |
63 | 63 | ||
64 | const struct file_operations nfs_file_operations = { | 64 | const struct file_operations nfs_file_operations = { |
65 | .llseek = nfs_file_llseek, | 65 | .llseek = nfs_file_llseek, |
@@ -328,6 +328,42 @@ nfs_file_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
328 | } | 328 | } |
329 | 329 | ||
330 | /* | 330 | /* |
331 | * Decide whether a read/modify/write cycle may be more efficient | ||
332 | * then a modify/write/read cycle when writing to a page in the | ||
333 | * page cache. | ||
334 | * | ||
335 | * The modify/write/read cycle may occur if a page is read before | ||
336 | * being completely filled by the writer. In this situation, the | ||
337 | * page must be completely written to stable storage on the server | ||
338 | * before it can be refilled by reading in the page from the server. | ||
339 | * This can lead to expensive, small, FILE_SYNC mode writes being | ||
340 | * done. | ||
341 | * | ||
342 | * It may be more efficient to read the page first if the file is | ||
343 | * open for reading in addition to writing, the page is not marked | ||
344 | * as Uptodate, it is not dirty or waiting to be committed, | ||
345 | * indicating that it was previously allocated and then modified, | ||
346 | * that there were valid bytes of data in that range of the file, | ||
347 | * and that the new data won't completely replace the old data in | ||
348 | * that range of the file. | ||
349 | */ | ||
350 | static int nfs_want_read_modify_write(struct file *file, struct page *page, | ||
351 | loff_t pos, unsigned len) | ||
352 | { | ||
353 | unsigned int pglen = nfs_page_length(page); | ||
354 | unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); | ||
355 | unsigned int end = offset + len; | ||
356 | |||
357 | if ((file->f_mode & FMODE_READ) && /* open for read? */ | ||
358 | !PageUptodate(page) && /* Uptodate? */ | ||
359 | !PagePrivate(page) && /* i/o request already? */ | ||
360 | pglen && /* valid bytes of file? */ | ||
361 | (end < pglen || offset)) /* replace all valid bytes? */ | ||
362 | return 1; | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /* | ||
331 | * This does the "real" work of the write. We must allocate and lock the | 367 | * This does the "real" work of the write. We must allocate and lock the |
332 | * page to be sent back to the generic routine, which then copies the | 368 | * page to be sent back to the generic routine, which then copies the |
333 | * data from user space. | 369 | * data from user space. |
@@ -340,15 +376,16 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, | |||
340 | struct page **pagep, void **fsdata) | 376 | struct page **pagep, void **fsdata) |
341 | { | 377 | { |
342 | int ret; | 378 | int ret; |
343 | pgoff_t index; | 379 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
344 | struct page *page; | 380 | struct page *page; |
345 | index = pos >> PAGE_CACHE_SHIFT; | 381 | int once_thru = 0; |
346 | 382 | ||
347 | dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n", | 383 | dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n", |
348 | file->f_path.dentry->d_parent->d_name.name, | 384 | file->f_path.dentry->d_parent->d_name.name, |
349 | file->f_path.dentry->d_name.name, | 385 | file->f_path.dentry->d_name.name, |
350 | mapping->host->i_ino, len, (long long) pos); | 386 | mapping->host->i_ino, len, (long long) pos); |
351 | 387 | ||
388 | start: | ||
352 | /* | 389 | /* |
353 | * Prevent starvation issues if someone is doing a consistency | 390 | * Prevent starvation issues if someone is doing a consistency |
354 | * sync-to-disk | 391 | * sync-to-disk |
@@ -367,6 +404,13 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, | |||
367 | if (ret) { | 404 | if (ret) { |
368 | unlock_page(page); | 405 | unlock_page(page); |
369 | page_cache_release(page); | 406 | page_cache_release(page); |
407 | } else if (!once_thru && | ||
408 | nfs_want_read_modify_write(file, page, pos, len)) { | ||
409 | once_thru = 1; | ||
410 | ret = nfs_readpage(file, page); | ||
411 | page_cache_release(page); | ||
412 | if (!ret) | ||
413 | goto start; | ||
370 | } | 414 | } |
371 | return ret; | 415 | return ret; |
372 | } | 416 | } |
@@ -479,7 +523,9 @@ const struct address_space_operations nfs_file_aops = { | |||
479 | .invalidatepage = nfs_invalidate_page, | 523 | .invalidatepage = nfs_invalidate_page, |
480 | .releasepage = nfs_release_page, | 524 | .releasepage = nfs_release_page, |
481 | .direct_IO = nfs_direct_IO, | 525 | .direct_IO = nfs_direct_IO, |
526 | .migratepage = nfs_migrate_page, | ||
482 | .launder_page = nfs_launder_page, | 527 | .launder_page = nfs_launder_page, |
528 | .error_remove_page = generic_error_remove_page, | ||
483 | }; | 529 | }; |
484 | 530 | ||
485 | /* | 531 | /* |
@@ -526,7 +572,7 @@ out_unlock: | |||
526 | return VM_FAULT_SIGBUS; | 572 | return VM_FAULT_SIGBUS; |
527 | } | 573 | } |
528 | 574 | ||
529 | static struct vm_operations_struct nfs_file_vm_ops = { | 575 | static const struct vm_operations_struct nfs_file_vm_ops = { |
530 | .fault = filemap_fault, | 576 | .fault = filemap_fault, |
531 | .page_mkwrite = nfs_vm_page_mkwrite, | 577 | .page_mkwrite = nfs_vm_page_mkwrite, |
532 | }; | 578 | }; |