aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/file.c')
-rw-r--r--fs/nfs/file.c105
1 files changed, 80 insertions, 25 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 579cf8a7d4a7..c664bb921425 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -33,6 +33,7 @@
33#include <asm/system.h> 33#include <asm/system.h>
34 34
35#include "delegation.h" 35#include "delegation.h"
36#include "internal.h"
36#include "iostat.h" 37#include "iostat.h"
37 38
38#define NFSDBG_FACILITY NFSDBG_FILE 39#define NFSDBG_FACILITY NFSDBG_FILE
@@ -55,6 +56,8 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl);
55static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); 56static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl);
56static int nfs_setlease(struct file *file, long arg, struct file_lock **fl); 57static int nfs_setlease(struct file *file, long arg, struct file_lock **fl);
57 58
59static struct vm_operations_struct nfs_file_vm_ops;
60
58const struct file_operations nfs_file_operations = { 61const struct file_operations nfs_file_operations = {
59 .llseek = nfs_file_llseek, 62 .llseek = nfs_file_llseek,
60 .read = do_sync_read, 63 .read = do_sync_read,
@@ -174,13 +177,38 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
174} 177}
175 178
176/* 179/*
180 * Helper for nfs_file_flush() and nfs_fsync()
181 *
182 * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
183 * disk, but it retrieves and clears ctx->error after synching, despite
184 * the two being set at the same time in nfs_context_set_write_error().
185 * This is because the former is used to notify the _next_ call to
186 * nfs_file_write() that a write error occured, and hence cause it to
187 * fall back to doing a synchronous write.
188 */
189static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode)
190{
191 int have_error, status;
192 int ret = 0;
193
194 have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
195 status = nfs_wb_all(inode);
196 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
197 if (have_error)
198 ret = xchg(&ctx->error, 0);
199 if (!ret)
200 ret = status;
201 return ret;
202}
203
204/*
177 * Flush all dirty pages, and check for write errors. 205 * Flush all dirty pages, and check for write errors.
178 * 206 *
179 */ 207 */
180static int 208static int
181nfs_file_flush(struct file *file, fl_owner_t id) 209nfs_file_flush(struct file *file, fl_owner_t id)
182{ 210{
183 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; 211 struct nfs_open_context *ctx = nfs_file_open_context(file);
184 struct inode *inode = file->f_path.dentry->d_inode; 212 struct inode *inode = file->f_path.dentry->d_inode;
185 int status; 213 int status;
186 214
@@ -189,16 +217,11 @@ nfs_file_flush(struct file *file, fl_owner_t id)
189 if ((file->f_mode & FMODE_WRITE) == 0) 217 if ((file->f_mode & FMODE_WRITE) == 0)
190 return 0; 218 return 0;
191 nfs_inc_stats(inode, NFSIOS_VFSFLUSH); 219 nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
192 lock_kernel(); 220
193 /* Ensure that data+attribute caches are up to date after close() */ 221 /* Ensure that data+attribute caches are up to date after close() */
194 status = nfs_wb_all(inode); 222 status = nfs_do_fsync(ctx, inode);
195 if (!status) { 223 if (!status)
196 status = ctx->error; 224 nfs_revalidate_inode(NFS_SERVER(inode), inode);
197 ctx->error = 0;
198 if (!status)
199 nfs_revalidate_inode(NFS_SERVER(inode), inode);
200 }
201 unlock_kernel();
202 return status; 225 return status;
203} 226}
204 227
@@ -257,8 +280,11 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
257 dentry->d_parent->d_name.name, dentry->d_name.name); 280 dentry->d_parent->d_name.name, dentry->d_name.name);
258 281
259 status = nfs_revalidate_mapping(inode, file->f_mapping); 282 status = nfs_revalidate_mapping(inode, file->f_mapping);
260 if (!status) 283 if (!status) {
261 status = generic_file_mmap(file, vma); 284 vma->vm_ops = &nfs_file_vm_ops;
285 vma->vm_flags |= VM_CAN_NONLINEAR;
286 file_accessed(file);
287 }
262 return status; 288 return status;
263} 289}
264 290
@@ -270,21 +296,13 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
270static int 296static int
271nfs_fsync(struct file *file, struct dentry *dentry, int datasync) 297nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
272{ 298{
273 struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data; 299 struct nfs_open_context *ctx = nfs_file_open_context(file);
274 struct inode *inode = dentry->d_inode; 300 struct inode *inode = dentry->d_inode;
275 int status;
276 301
277 dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino); 302 dfprintk(VFS, "nfs: fsync(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
278 303
279 nfs_inc_stats(inode, NFSIOS_VFSFSYNC); 304 nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
280 lock_kernel(); 305 return nfs_do_fsync(ctx, inode);
281 status = nfs_wb_all(inode);
282 if (!status) {
283 status = ctx->error;
284 ctx->error = 0;
285 }
286 unlock_kernel();
287 return status;
288} 306}
289 307
290/* 308/*
@@ -333,7 +351,7 @@ static int nfs_launder_page(struct page *page)
333const struct address_space_operations nfs_file_aops = { 351const struct address_space_operations nfs_file_aops = {
334 .readpage = nfs_readpage, 352 .readpage = nfs_readpage,
335 .readpages = nfs_readpages, 353 .readpages = nfs_readpages,
336 .set_page_dirty = nfs_set_page_dirty, 354 .set_page_dirty = __set_page_dirty_nobuffers,
337 .writepage = nfs_writepage, 355 .writepage = nfs_writepage,
338 .writepages = nfs_writepages, 356 .writepages = nfs_writepages,
339 .prepare_write = nfs_prepare_write, 357 .prepare_write = nfs_prepare_write,
@@ -346,6 +364,43 @@ const struct address_space_operations nfs_file_aops = {
346 .launder_page = nfs_launder_page, 364 .launder_page = nfs_launder_page,
347}; 365};
348 366
367static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
368{
369 struct file *filp = vma->vm_file;
370 unsigned pagelen;
371 int ret = -EINVAL;
372
373 lock_page(page);
374 if (page->mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping)
375 goto out_unlock;
376 pagelen = nfs_page_length(page);
377 if (pagelen == 0)
378 goto out_unlock;
379 ret = nfs_prepare_write(filp, page, 0, pagelen);
380 if (!ret)
381 ret = nfs_commit_write(filp, page, 0, pagelen);
382out_unlock:
383 unlock_page(page);
384 return ret;
385}
386
387static struct vm_operations_struct nfs_file_vm_ops = {
388 .fault = filemap_fault,
389 .page_mkwrite = nfs_vm_page_mkwrite,
390};
391
392static int nfs_need_sync_write(struct file *filp, struct inode *inode)
393{
394 struct nfs_open_context *ctx;
395
396 if (IS_SYNC(inode) || (filp->f_flags & O_SYNC))
397 return 1;
398 ctx = nfs_file_open_context(filp);
399 if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags))
400 return 1;
401 return 0;
402}
403
349static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov, 404static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
350 unsigned long nr_segs, loff_t pos) 405 unsigned long nr_segs, loff_t pos)
351{ 406{
@@ -382,8 +437,8 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
382 nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count); 437 nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
383 result = generic_file_aio_write(iocb, iov, nr_segs, pos); 438 result = generic_file_aio_write(iocb, iov, nr_segs, pos);
384 /* Return error values for O_SYNC and IS_SYNC() */ 439 /* Return error values for O_SYNC and IS_SYNC() */
385 if (result >= 0 && (IS_SYNC(inode) || (iocb->ki_filp->f_flags & O_SYNC))) { 440 if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
386 int err = nfs_fsync(iocb->ki_filp, dentry, 1); 441 int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode);
387 if (err < 0) 442 if (err < 0)
388 result = err; 443 result = err;
389 } 444 }