aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/nfsd/vfs.c69
1 files changed, 39 insertions, 30 deletions
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index ebf56c6040c..6ad76a4cfc0 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -963,6 +963,43 @@ static void kill_suid(struct dentry *dentry)
963 mutex_unlock(&dentry->d_inode->i_mutex); 963 mutex_unlock(&dentry->d_inode->i_mutex);
964} 964}
965 965
966/*
967 * Gathered writes: If another process is currently writing to the file,
968 * there's a high chance this is another nfsd (triggered by a bulk write
969 * from a client's biod). Rather than syncing the file with each write
970 * request, we sleep for 10 msec.
971 *
972 * I don't know if this roughly approximates C. Juszak's idea of
973 * gathered writes, but it's a nice and simple solution (IMHO), and it
974 * seems to work:-)
975 *
976 * Note: we do this only in the NFSv2 case, since v3 and higher have a
977 * better tool (separate unstable writes and commits) for solving this
978 * problem.
979 */
980static int wait_for_concurrent_writes(struct file *file)
981{
982 struct inode *inode = file->f_path.dentry->d_inode;
983 static ino_t last_ino;
984 static dev_t last_dev;
985 int err = 0;
986
987 if (atomic_read(&inode->i_writecount) > 1
988 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
989 dprintk("nfsd: write defer %d\n", task_pid_nr(current));
990 msleep(10);
991 dprintk("nfsd: write resume %d\n", task_pid_nr(current));
992 }
993
994 if (inode->i_state & I_DIRTY) {
995 dprintk("nfsd: write sync %d\n", task_pid_nr(current));
996 err = nfsd_sync(file);
997 }
998 last_ino = inode->i_ino;
999 last_dev = inode->i_sb->s_dev;
1000 return err;
1001}
1002
966static __be32 1003static __be32
967nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, 1004nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
968 loff_t offset, struct kvec *vec, int vlen, 1005 loff_t offset, struct kvec *vec, int vlen,
@@ -1026,36 +1063,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1026 if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID))) 1063 if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
1027 kill_suid(dentry); 1064 kill_suid(dentry);
1028 1065
1029 if (host_err >= 0 && stable && use_wgather) { 1066 if (host_err >= 0 && stable && use_wgather)
1030 static ino_t last_ino; 1067 host_err = wait_for_concurrent_writes(file);
1031 static dev_t last_dev;
1032
1033 /*
1034 * Gathered writes: If another process is currently
1035 * writing to the file, there's a high chance
1036 * this is another nfsd (triggered by a bulk write
1037 * from a client's biod). Rather than syncing the
1038 * file with each write request, we sleep for 10 msec.
1039 *
1040 * I don't know if this roughly approximates
1041 * C. Juszak's idea of gathered writes, but it's a
1042 * nice and simple solution (IMHO), and it seems to
1043 * work:-)
1044 */
1045 if (atomic_read(&inode->i_writecount) > 1
1046 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
1047 dprintk("nfsd: write defer %d\n", task_pid_nr(current));
1048 msleep(10);
1049 dprintk("nfsd: write resume %d\n", task_pid_nr(current));
1050 }
1051
1052 if (inode->i_state & I_DIRTY) {
1053 dprintk("nfsd: write sync %d\n", task_pid_nr(current));
1054 host_err=nfsd_sync(file);
1055 }
1056 last_ino = inode->i_ino;
1057 last_dev = inode->i_sb->s_dev;
1058 }
1059 1068
1060 dprintk("nfsd: write complete host_err=%d\n", host_err); 1069 dprintk("nfsd: write complete host_err=%d\n", host_err);
1061 if (host_err >= 0) 1070 if (host_err >= 0)