diff options
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/client.c | 3 | ||||
-rw-r--r-- | fs/nfs/dir.c | 25 | ||||
-rw-r--r-- | fs/nfs/direct.c | 5 | ||||
-rw-r--r-- | fs/nfs/inode.c | 3 | ||||
-rw-r--r-- | fs/nfs/internal.h | 12 | ||||
-rw-r--r-- | fs/nfs/mount_clnt.c | 7 | ||||
-rw-r--r-- | fs/nfs/nfs2xdr.c | 7 | ||||
-rw-r--r-- | fs/nfs/nfs3xdr.c | 13 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 3 | ||||
-rw-r--r-- | fs/nfs/nfs4xdr.c | 7 | ||||
-rw-r--r-- | fs/nfs/nfsroot.c | 2 | ||||
-rw-r--r-- | fs/nfs/pagelist.c | 242 | ||||
-rw-r--r-- | fs/nfs/read.c | 92 | ||||
-rw-r--r-- | fs/nfs/super.c | 10 | ||||
-rw-r--r-- | fs/nfs/symlink.c | 6 | ||||
-rw-r--r-- | fs/nfs/write.c | 421 |
16 files changed, 406 insertions, 452 deletions
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2190e6c2792e..5bd03b97002e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -618,7 +618,8 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat | |||
618 | if (clp->cl_nfsversion == 3) { | 618 | if (clp->cl_nfsversion == 3) { |
619 | if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) | 619 | if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) |
620 | server->namelen = NFS3_MAXNAMLEN; | 620 | server->namelen = NFS3_MAXNAMLEN; |
621 | server->caps |= NFS_CAP_READDIRPLUS; | 621 | if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) |
622 | server->caps |= NFS_CAP_READDIRPLUS; | ||
622 | } else { | 623 | } else { |
623 | if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) | 624 | if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) |
624 | server->namelen = NFS2_MAXNAMLEN; | 625 | server->namelen = NFS2_MAXNAMLEN; |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index cd3469720cbf..625d8e5fb39d 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -154,6 +154,8 @@ typedef struct { | |||
154 | decode_dirent_t decode; | 154 | decode_dirent_t decode; |
155 | int plus; | 155 | int plus; |
156 | int error; | 156 | int error; |
157 | unsigned long timestamp; | ||
158 | int timestamp_valid; | ||
157 | } nfs_readdir_descriptor_t; | 159 | } nfs_readdir_descriptor_t; |
158 | 160 | ||
159 | /* Now we cache directories properly, by stuffing the dirent | 161 | /* Now we cache directories properly, by stuffing the dirent |
@@ -195,6 +197,8 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page) | |||
195 | } | 197 | } |
196 | goto error; | 198 | goto error; |
197 | } | 199 | } |
200 | desc->timestamp = timestamp; | ||
201 | desc->timestamp_valid = 1; | ||
198 | SetPageUptodate(page); | 202 | SetPageUptodate(page); |
199 | spin_lock(&inode->i_lock); | 203 | spin_lock(&inode->i_lock); |
200 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; | 204 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; |
@@ -225,6 +229,10 @@ int dir_decode(nfs_readdir_descriptor_t *desc) | |||
225 | if (IS_ERR(p)) | 229 | if (IS_ERR(p)) |
226 | return PTR_ERR(p); | 230 | return PTR_ERR(p); |
227 | desc->ptr = p; | 231 | desc->ptr = p; |
232 | if (desc->timestamp_valid) | ||
233 | desc->entry->fattr->time_start = desc->timestamp; | ||
234 | else | ||
235 | desc->entry->fattr->valid &= ~NFS_ATTR_FATTR; | ||
228 | return 0; | 236 | return 0; |
229 | } | 237 | } |
230 | 238 | ||
@@ -316,14 +324,16 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc) | |||
316 | __FUNCTION__, desc->page_index, | 324 | __FUNCTION__, desc->page_index, |
317 | (long long) *desc->dir_cookie); | 325 | (long long) *desc->dir_cookie); |
318 | 326 | ||
327 | /* If we find the page in the page_cache, we cannot be sure | ||
328 | * how fresh the data is, so we will ignore readdir_plus attributes. | ||
329 | */ | ||
330 | desc->timestamp_valid = 0; | ||
319 | page = read_cache_page(inode->i_mapping, desc->page_index, | 331 | page = read_cache_page(inode->i_mapping, desc->page_index, |
320 | (filler_t *)nfs_readdir_filler, desc); | 332 | (filler_t *)nfs_readdir_filler, desc); |
321 | if (IS_ERR(page)) { | 333 | if (IS_ERR(page)) { |
322 | status = PTR_ERR(page); | 334 | status = PTR_ERR(page); |
323 | goto out; | 335 | goto out; |
324 | } | 336 | } |
325 | if (!PageUptodate(page)) | ||
326 | goto read_error; | ||
327 | 337 | ||
328 | /* NOTE: Someone else may have changed the READDIRPLUS flag */ | 338 | /* NOTE: Someone else may have changed the READDIRPLUS flag */ |
329 | desc->page = page; | 339 | desc->page = page; |
@@ -337,9 +347,6 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc) | |||
337 | out: | 347 | out: |
338 | dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, status); | 348 | dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, status); |
339 | return status; | 349 | return status; |
340 | read_error: | ||
341 | page_cache_release(page); | ||
342 | return -EIO; | ||
343 | } | 350 | } |
344 | 351 | ||
345 | /* | 352 | /* |
@@ -468,6 +475,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
468 | struct rpc_cred *cred = nfs_file_cred(file); | 475 | struct rpc_cred *cred = nfs_file_cred(file); |
469 | struct page *page = NULL; | 476 | struct page *page = NULL; |
470 | int status; | 477 | int status; |
478 | unsigned long timestamp; | ||
471 | 479 | ||
472 | dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n", | 480 | dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n", |
473 | (unsigned long long)*desc->dir_cookie); | 481 | (unsigned long long)*desc->dir_cookie); |
@@ -477,6 +485,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
477 | status = -ENOMEM; | 485 | status = -ENOMEM; |
478 | goto out; | 486 | goto out; |
479 | } | 487 | } |
488 | timestamp = jiffies; | ||
480 | desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie, | 489 | desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie, |
481 | page, | 490 | page, |
482 | NFS_SERVER(inode)->dtsize, | 491 | NFS_SERVER(inode)->dtsize, |
@@ -487,6 +496,8 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
487 | desc->page = page; | 496 | desc->page = page; |
488 | desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ | 497 | desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ |
489 | if (desc->error >= 0) { | 498 | if (desc->error >= 0) { |
499 | desc->timestamp = timestamp; | ||
500 | desc->timestamp_valid = 1; | ||
490 | if ((status = dir_decode(desc)) == 0) | 501 | if ((status = dir_decode(desc)) == 0) |
491 | desc->entry->prev_cookie = *desc->dir_cookie; | 502 | desc->entry->prev_cookie = *desc->dir_cookie; |
492 | } else | 503 | } else |
@@ -849,6 +860,10 @@ static int nfs_dentry_delete(struct dentry *dentry) | |||
849 | static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) | 860 | static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) |
850 | { | 861 | { |
851 | nfs_inode_return_delegation(inode); | 862 | nfs_inode_return_delegation(inode); |
863 | if (S_ISDIR(inode->i_mode)) | ||
864 | /* drop any readdir cache as it could easily be old */ | ||
865 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; | ||
866 | |||
852 | if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { | 867 | if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { |
853 | lock_kernel(); | 868 | lock_kernel(); |
854 | drop_nlink(inode); | 869 | drop_nlink(inode); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 2877744cb606..889de60f8a84 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
55 | #include <asm/atomic.h> | 55 | #include <asm/atomic.h> |
56 | 56 | ||
57 | #include "internal.h" | ||
57 | #include "iostat.h" | 58 | #include "iostat.h" |
58 | 59 | ||
59 | #define NFSDBG_FACILITY NFSDBG_VFS | 60 | #define NFSDBG_FACILITY NFSDBG_VFS |
@@ -271,7 +272,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo | |||
271 | bytes = min(rsize,count); | 272 | bytes = min(rsize,count); |
272 | 273 | ||
273 | result = -ENOMEM; | 274 | result = -ENOMEM; |
274 | data = nfs_readdata_alloc(pgbase + bytes); | 275 | data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes)); |
275 | if (unlikely(!data)) | 276 | if (unlikely(!data)) |
276 | break; | 277 | break; |
277 | 278 | ||
@@ -602,7 +603,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l | |||
602 | bytes = min(wsize,count); | 603 | bytes = min(wsize,count); |
603 | 604 | ||
604 | result = -ENOMEM; | 605 | result = -ENOMEM; |
605 | data = nfs_writedata_alloc(pgbase + bytes); | 606 | data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes)); |
606 | if (unlikely(!data)) | 607 | if (unlikely(!data)) |
607 | break; | 608 | break; |
608 | 609 | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 44aa9b726573..1e9a915d1fea 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1167,8 +1167,7 @@ static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flag | |||
1167 | { | 1167 | { |
1168 | struct nfs_inode *nfsi = (struct nfs_inode *) foo; | 1168 | struct nfs_inode *nfsi = (struct nfs_inode *) foo; |
1169 | 1169 | ||
1170 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | 1170 | if (flags & SLAB_CTOR_CONSTRUCTOR) { |
1171 | SLAB_CTOR_CONSTRUCTOR) { | ||
1172 | inode_init_once(&nfsi->vfs_inode); | 1171 | inode_init_once(&nfsi->vfs_inode); |
1173 | spin_lock_init(&nfsi->req_lock); | 1172 | spin_lock_init(&nfsi->req_lock); |
1174 | INIT_LIST_HEAD(&nfsi->dirty); | 1173 | INIT_LIST_HEAD(&nfsi->dirty); |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 6610f2b02077..ad2b40db1e65 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -231,3 +231,15 @@ unsigned int nfs_page_length(struct page *page) | |||
231 | } | 231 | } |
232 | return 0; | 232 | return 0; |
233 | } | 233 | } |
234 | |||
235 | /* | ||
236 | * Determine the number of pages in an array of length 'len' and | ||
237 | * with a base offset of 'base' | ||
238 | */ | ||
239 | static inline | ||
240 | unsigned int nfs_page_array_len(unsigned int base, size_t len) | ||
241 | { | ||
242 | return ((unsigned long)len + (unsigned long)base + | ||
243 | PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
244 | } | ||
245 | |||
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index f75fe72b4160..ca5a266a3140 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c | |||
@@ -133,13 +133,15 @@ xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) | |||
133 | 133 | ||
134 | #define MNT_dirpath_sz (1 + 256) | 134 | #define MNT_dirpath_sz (1 + 256) |
135 | #define MNT_fhstatus_sz (1 + 8) | 135 | #define MNT_fhstatus_sz (1 + 8) |
136 | #define MNT_fhstatus3_sz (1 + 16) | ||
136 | 137 | ||
137 | static struct rpc_procinfo mnt_procedures[] = { | 138 | static struct rpc_procinfo mnt_procedures[] = { |
138 | [MNTPROC_MNT] = { | 139 | [MNTPROC_MNT] = { |
139 | .p_proc = MNTPROC_MNT, | 140 | .p_proc = MNTPROC_MNT, |
140 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, | 141 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, |
141 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus, | 142 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus, |
142 | .p_bufsiz = MNT_dirpath_sz << 2, | 143 | .p_arglen = MNT_dirpath_sz, |
144 | .p_replen = MNT_fhstatus_sz, | ||
143 | .p_statidx = MNTPROC_MNT, | 145 | .p_statidx = MNTPROC_MNT, |
144 | .p_name = "MOUNT", | 146 | .p_name = "MOUNT", |
145 | }, | 147 | }, |
@@ -150,7 +152,8 @@ static struct rpc_procinfo mnt3_procedures[] = { | |||
150 | .p_proc = MOUNTPROC3_MNT, | 152 | .p_proc = MOUNTPROC3_MNT, |
151 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, | 153 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, |
152 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, | 154 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, |
153 | .p_bufsiz = MNT_dirpath_sz << 2, | 155 | .p_arglen = MNT_dirpath_sz, |
156 | .p_replen = MNT_fhstatus3_sz, | ||
154 | .p_statidx = MOUNTPROC3_MNT, | 157 | .p_statidx = MOUNTPROC3_MNT, |
155 | .p_name = "MOUNT", | 158 | .p_name = "MOUNT", |
156 | }, | 159 | }, |
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 3be4e72a0227..abd9f8b48943 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c | |||
@@ -687,16 +687,13 @@ nfs_stat_to_errno(int stat) | |||
687 | return nfs_errtbl[i].errno; | 687 | return nfs_errtbl[i].errno; |
688 | } | 688 | } |
689 | 689 | ||
690 | #ifndef MAX | ||
691 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
692 | #endif | ||
693 | |||
694 | #define PROC(proc, argtype, restype, timer) \ | 690 | #define PROC(proc, argtype, restype, timer) \ |
695 | [NFSPROC_##proc] = { \ | 691 | [NFSPROC_##proc] = { \ |
696 | .p_proc = NFSPROC_##proc, \ | 692 | .p_proc = NFSPROC_##proc, \ |
697 | .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \ | 693 | .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \ |
698 | .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ | 694 | .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ |
699 | .p_bufsiz = MAX(NFS_##argtype##_sz,NFS_##restype##_sz) << 2, \ | 695 | .p_arglen = NFS_##argtype##_sz, \ |
696 | .p_replen = NFS_##restype##_sz, \ | ||
700 | .p_timer = timer, \ | 697 | .p_timer = timer, \ |
701 | .p_statidx = NFSPROC_##proc, \ | 698 | .p_statidx = NFSPROC_##proc, \ |
702 | .p_name = #proc, \ | 699 | .p_name = #proc, \ |
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 0ace092d126f..b51df8eb9f01 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c | |||
@@ -1102,16 +1102,13 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) | |||
1102 | } | 1102 | } |
1103 | #endif /* CONFIG_NFS_V3_ACL */ | 1103 | #endif /* CONFIG_NFS_V3_ACL */ |
1104 | 1104 | ||
1105 | #ifndef MAX | ||
1106 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
1107 | #endif | ||
1108 | |||
1109 | #define PROC(proc, argtype, restype, timer) \ | 1105 | #define PROC(proc, argtype, restype, timer) \ |
1110 | [NFS3PROC_##proc] = { \ | 1106 | [NFS3PROC_##proc] = { \ |
1111 | .p_proc = NFS3PROC_##proc, \ | 1107 | .p_proc = NFS3PROC_##proc, \ |
1112 | .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \ | 1108 | .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \ |
1113 | .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ | 1109 | .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ |
1114 | .p_bufsiz = MAX(NFS3_##argtype##_sz,NFS3_##restype##_sz) << 2, \ | 1110 | .p_arglen = NFS3_##argtype##_sz, \ |
1111 | .p_replen = NFS3_##restype##_sz, \ | ||
1115 | .p_timer = timer, \ | 1112 | .p_timer = timer, \ |
1116 | .p_statidx = NFS3PROC_##proc, \ | 1113 | .p_statidx = NFS3PROC_##proc, \ |
1117 | .p_name = #proc, \ | 1114 | .p_name = #proc, \ |
@@ -1153,7 +1150,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { | |||
1153 | .p_proc = ACLPROC3_GETACL, | 1150 | .p_proc = ACLPROC3_GETACL, |
1154 | .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, | 1151 | .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, |
1155 | .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, | 1152 | .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, |
1156 | .p_bufsiz = MAX(ACL3_getaclargs_sz, ACL3_getaclres_sz) << 2, | 1153 | .p_arglen = ACL3_getaclargs_sz, |
1154 | .p_replen = ACL3_getaclres_sz, | ||
1157 | .p_timer = 1, | 1155 | .p_timer = 1, |
1158 | .p_name = "GETACL", | 1156 | .p_name = "GETACL", |
1159 | }, | 1157 | }, |
@@ -1161,7 +1159,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { | |||
1161 | .p_proc = ACLPROC3_SETACL, | 1159 | .p_proc = ACLPROC3_SETACL, |
1162 | .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, | 1160 | .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, |
1163 | .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, | 1161 | .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, |
1164 | .p_bufsiz = MAX(ACL3_setaclargs_sz, ACL3_setaclres_sz) << 2, | 1162 | .p_arglen = ACL3_setaclargs_sz, |
1163 | .p_replen = ACL3_setaclres_sz, | ||
1165 | .p_timer = 0, | 1164 | .p_timer = 0, |
1166 | .p_name = "SETACL", | 1165 | .p_name = "SETACL", |
1167 | }, | 1166 | }, |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d9000ec52f72..d6a30e965787 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2647,8 +2647,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl | |||
2647 | nfs_inode_return_delegation(inode); | 2647 | nfs_inode_return_delegation(inode); |
2648 | buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); | 2648 | buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); |
2649 | ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); | 2649 | ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); |
2650 | if (ret == 0) | 2650 | nfs_zap_caches(inode); |
2651 | nfs4_write_cached_acl(inode, buf, buflen); | ||
2652 | return ret; | 2651 | return ret; |
2653 | } | 2652 | } |
2654 | 2653 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f02d522fd788..b8c28f2380a5 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -4546,16 +4546,13 @@ nfs4_stat_to_errno(int stat) | |||
4546 | return stat; | 4546 | return stat; |
4547 | } | 4547 | } |
4548 | 4548 | ||
4549 | #ifndef MAX | ||
4550 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
4551 | #endif | ||
4552 | |||
4553 | #define PROC(proc, argtype, restype) \ | 4549 | #define PROC(proc, argtype, restype) \ |
4554 | [NFSPROC4_CLNT_##proc] = { \ | 4550 | [NFSPROC4_CLNT_##proc] = { \ |
4555 | .p_proc = NFSPROC4_COMPOUND, \ | 4551 | .p_proc = NFSPROC4_COMPOUND, \ |
4556 | .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ | 4552 | .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ |
4557 | .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ | 4553 | .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ |
4558 | .p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \ | 4554 | .p_arglen = NFS4_##argtype##_sz, \ |
4555 | .p_replen = NFS4_##restype##_sz, \ | ||
4559 | .p_statidx = NFSPROC4_CLNT_##proc, \ | 4556 | .p_statidx = NFSPROC4_CLNT_##proc, \ |
4560 | .p_name = #proc, \ | 4557 | .p_name = #proc, \ |
4561 | } | 4558 | } |
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 75f819dc0255..49d1008ce1d7 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c | |||
@@ -428,7 +428,7 @@ static int __init root_nfs_getport(int program, int version, int proto) | |||
428 | printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", | 428 | printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", |
429 | program, version, NIPQUAD(servaddr)); | 429 | program, version, NIPQUAD(servaddr)); |
430 | set_sockaddr(&sin, servaddr, 0); | 430 | set_sockaddr(&sin, servaddr, 0); |
431 | return rpc_getport_external(&sin, program, version, proto); | 431 | return rpcb_getport_external(&sin, program, version, proto); |
432 | } | 432 | } |
433 | 433 | ||
434 | 434 | ||
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ca4b1d4ff42b..388950118f59 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -17,7 +17,8 @@ | |||
17 | #include <linux/nfs_page.h> | 17 | #include <linux/nfs_page.h> |
18 | #include <linux/nfs_fs.h> | 18 | #include <linux/nfs_fs.h> |
19 | #include <linux/nfs_mount.h> | 19 | #include <linux/nfs_mount.h> |
20 | #include <linux/writeback.h> | 20 | |
21 | #include "internal.h" | ||
21 | 22 | ||
22 | #define NFS_PARANOIA 1 | 23 | #define NFS_PARANOIA 1 |
23 | 24 | ||
@@ -50,9 +51,7 @@ nfs_page_free(struct nfs_page *p) | |||
50 | * @count: number of bytes to read/write | 51 | * @count: number of bytes to read/write |
51 | * | 52 | * |
52 | * The page must be locked by the caller. This makes sure we never | 53 | * The page must be locked by the caller. This makes sure we never |
53 | * create two different requests for the same page, and avoids | 54 | * create two different requests for the same page. |
54 | * a possible deadlock when we reach the hard limit on the number | ||
55 | * of dirty pages. | ||
56 | * User should ensure it is safe to sleep in this function. | 55 | * User should ensure it is safe to sleep in this function. |
57 | */ | 56 | */ |
58 | struct nfs_page * | 57 | struct nfs_page * |
@@ -63,16 +62,12 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |||
63 | struct nfs_server *server = NFS_SERVER(inode); | 62 | struct nfs_server *server = NFS_SERVER(inode); |
64 | struct nfs_page *req; | 63 | struct nfs_page *req; |
65 | 64 | ||
66 | /* Deal with hard limits. */ | ||
67 | for (;;) { | 65 | for (;;) { |
68 | /* try to allocate the request struct */ | 66 | /* try to allocate the request struct */ |
69 | req = nfs_page_alloc(); | 67 | req = nfs_page_alloc(); |
70 | if (req != NULL) | 68 | if (req != NULL) |
71 | break; | 69 | break; |
72 | 70 | ||
73 | /* Try to free up at least one request in order to stay | ||
74 | * below the hard limit | ||
75 | */ | ||
76 | if (signalled() && (server->flags & NFS_MOUNT_INTR)) | 71 | if (signalled() && (server->flags & NFS_MOUNT_INTR)) |
77 | return ERR_PTR(-ERESTARTSYS); | 72 | return ERR_PTR(-ERESTARTSYS); |
78 | yield(); | 73 | yield(); |
@@ -223,124 +218,151 @@ out: | |||
223 | } | 218 | } |
224 | 219 | ||
225 | /** | 220 | /** |
226 | * nfs_coalesce_requests - Split coalesced requests out from a list. | 221 | * nfs_pageio_init - initialise a page io descriptor |
227 | * @head: source list | 222 | * @desc: pointer to descriptor |
228 | * @dst: destination list | 223 | * @inode: pointer to inode |
229 | * @nmax: maximum number of requests to coalesce | 224 | * @doio: pointer to io function |
230 | * | 225 | * @bsize: io block size |
231 | * Moves a maximum of 'nmax' elements from one list to another. | 226 | * @io_flags: extra parameters for the io function |
232 | * The elements are checked to ensure that they form a contiguous set | ||
233 | * of pages, and that the RPC credentials are the same. | ||
234 | */ | 227 | */ |
235 | int | 228 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
236 | nfs_coalesce_requests(struct list_head *head, struct list_head *dst, | 229 | struct inode *inode, |
237 | unsigned int nmax) | 230 | int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), |
231 | size_t bsize, | ||
232 | int io_flags) | ||
238 | { | 233 | { |
239 | struct nfs_page *req = NULL; | 234 | INIT_LIST_HEAD(&desc->pg_list); |
240 | unsigned int npages = 0; | 235 | desc->pg_bytes_written = 0; |
241 | 236 | desc->pg_count = 0; | |
242 | while (!list_empty(head)) { | 237 | desc->pg_bsize = bsize; |
243 | struct nfs_page *prev = req; | 238 | desc->pg_base = 0; |
244 | 239 | desc->pg_inode = inode; | |
245 | req = nfs_list_entry(head->next); | 240 | desc->pg_doio = doio; |
246 | if (prev) { | 241 | desc->pg_ioflags = io_flags; |
247 | if (req->wb_context->cred != prev->wb_context->cred) | 242 | desc->pg_error = 0; |
248 | break; | ||
249 | if (req->wb_context->lockowner != prev->wb_context->lockowner) | ||
250 | break; | ||
251 | if (req->wb_context->state != prev->wb_context->state) | ||
252 | break; | ||
253 | if (req->wb_index != (prev->wb_index + 1)) | ||
254 | break; | ||
255 | |||
256 | if (req->wb_pgbase != 0) | ||
257 | break; | ||
258 | } | ||
259 | nfs_list_remove_request(req); | ||
260 | nfs_list_add_request(req, dst); | ||
261 | npages++; | ||
262 | if (req->wb_pgbase + req->wb_bytes != PAGE_CACHE_SIZE) | ||
263 | break; | ||
264 | if (npages >= nmax) | ||
265 | break; | ||
266 | } | ||
267 | return npages; | ||
268 | } | 243 | } |
269 | 244 | ||
270 | #define NFS_SCAN_MAXENTRIES 16 | ||
271 | /** | 245 | /** |
272 | * nfs_scan_dirty - Scan the radix tree for dirty requests | 246 | * nfs_can_coalesce_requests - test two requests for compatibility |
273 | * @mapping: pointer to address space | 247 | * @prev: pointer to nfs_page |
274 | * @wbc: writeback_control structure | 248 | * @req: pointer to nfs_page |
275 | * @dst: Destination list | ||
276 | * | 249 | * |
277 | * Moves elements from one of the inode request lists. | 250 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the |
278 | * If the number of requests is set to 0, the entire address_space | 251 | * page data area they describe is contiguous, and that their RPC |
279 | * starting at index idx_start, is scanned. | 252 | * credentials, NFSv4 open state, and lockowners are the same. |
280 | * The requests are *not* checked to ensure that they form a contiguous set. | 253 | * |
281 | * You must be holding the inode's req_lock when calling this function | 254 | * Return 'true' if this is the case, else return 'false'. |
282 | */ | 255 | */ |
283 | long nfs_scan_dirty(struct address_space *mapping, | 256 | static int nfs_can_coalesce_requests(struct nfs_page *prev, |
284 | struct writeback_control *wbc, | 257 | struct nfs_page *req) |
285 | struct list_head *dst) | ||
286 | { | 258 | { |
287 | struct nfs_inode *nfsi = NFS_I(mapping->host); | 259 | if (req->wb_context->cred != prev->wb_context->cred) |
288 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; | ||
289 | struct nfs_page *req; | ||
290 | pgoff_t idx_start, idx_end; | ||
291 | long res = 0; | ||
292 | int found, i; | ||
293 | |||
294 | if (nfsi->ndirty == 0) | ||
295 | return 0; | 260 | return 0; |
296 | if (wbc->range_cyclic) { | 261 | if (req->wb_context->lockowner != prev->wb_context->lockowner) |
297 | idx_start = 0; | 262 | return 0; |
298 | idx_end = ULONG_MAX; | 263 | if (req->wb_context->state != prev->wb_context->state) |
299 | } else if (wbc->range_end == 0) { | 264 | return 0; |
300 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 265 | if (req->wb_index != (prev->wb_index + 1)) |
301 | idx_end = ULONG_MAX; | 266 | return 0; |
302 | } else { | 267 | if (req->wb_pgbase != 0) |
303 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 268 | return 0; |
304 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | 269 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) |
305 | } | 270 | return 0; |
271 | return 1; | ||
272 | } | ||
306 | 273 | ||
307 | for (;;) { | 274 | /** |
308 | unsigned int toscan = NFS_SCAN_MAXENTRIES; | 275 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
276 | * @desc: destination io descriptor | ||
277 | * @req: request | ||
278 | * | ||
279 | * Returns true if the request 'req' was successfully coalesced into the | ||
280 | * existing list of pages 'desc'. | ||
281 | */ | ||
282 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, | ||
283 | struct nfs_page *req) | ||
284 | { | ||
285 | size_t newlen = req->wb_bytes; | ||
309 | 286 | ||
310 | found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, | 287 | if (desc->pg_count != 0) { |
311 | (void **)&pgvec[0], idx_start, toscan, | 288 | struct nfs_page *prev; |
312 | NFS_PAGE_TAG_DIRTY); | ||
313 | 289 | ||
314 | /* Did we make progress? */ | 290 | /* |
315 | if (found <= 0) | 291 | * FIXME: ideally we should be able to coalesce all requests |
316 | break; | 292 | * that are not block boundary aligned, but currently this |
293 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, | ||
294 | * since nfs_flush_multi and nfs_pagein_multi assume you | ||
295 | * can have only one struct nfs_page. | ||
296 | */ | ||
297 | if (desc->pg_bsize < PAGE_SIZE) | ||
298 | return 0; | ||
299 | newlen += desc->pg_count; | ||
300 | if (newlen > desc->pg_bsize) | ||
301 | return 0; | ||
302 | prev = nfs_list_entry(desc->pg_list.prev); | ||
303 | if (!nfs_can_coalesce_requests(prev, req)) | ||
304 | return 0; | ||
305 | } else | ||
306 | desc->pg_base = req->wb_pgbase; | ||
307 | nfs_list_remove_request(req); | ||
308 | nfs_list_add_request(req, &desc->pg_list); | ||
309 | desc->pg_count = newlen; | ||
310 | return 1; | ||
311 | } | ||
317 | 312 | ||
318 | for (i = 0; i < found; i++) { | 313 | /* |
319 | req = pgvec[i]; | 314 | * Helper for nfs_pageio_add_request and nfs_pageio_complete |
320 | if (!wbc->range_cyclic && req->wb_index > idx_end) | 315 | */ |
321 | goto out; | 316 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) |
317 | { | ||
318 | if (!list_empty(&desc->pg_list)) { | ||
319 | int error = desc->pg_doio(desc->pg_inode, | ||
320 | &desc->pg_list, | ||
321 | nfs_page_array_len(desc->pg_base, | ||
322 | desc->pg_count), | ||
323 | desc->pg_count, | ||
324 | desc->pg_ioflags); | ||
325 | if (error < 0) | ||
326 | desc->pg_error = error; | ||
327 | else | ||
328 | desc->pg_bytes_written += desc->pg_count; | ||
329 | } | ||
330 | if (list_empty(&desc->pg_list)) { | ||
331 | desc->pg_count = 0; | ||
332 | desc->pg_base = 0; | ||
333 | } | ||
334 | } | ||
322 | 335 | ||
323 | /* Try to lock request and mark it for writeback */ | 336 | /** |
324 | if (!nfs_set_page_writeback_locked(req)) | 337 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. |
325 | goto next; | 338 | * @desc: destination io descriptor |
326 | radix_tree_tag_clear(&nfsi->nfs_page_tree, | 339 | * @req: request |
327 | req->wb_index, NFS_PAGE_TAG_DIRTY); | 340 | * |
328 | nfsi->ndirty--; | 341 | * Returns true if the request 'req' was successfully coalesced into the |
329 | nfs_list_remove_request(req); | 342 | * existing list of pages 'desc'. |
330 | nfs_list_add_request(req, dst); | 343 | */ |
331 | res++; | 344 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
332 | if (res == LONG_MAX) | 345 | struct nfs_page *req) |
333 | goto out; | 346 | { |
334 | next: | 347 | while (!nfs_pageio_do_add_request(desc, req)) { |
335 | idx_start = req->wb_index + 1; | 348 | nfs_pageio_doio(desc); |
336 | } | 349 | if (desc->pg_error < 0) |
350 | return 0; | ||
337 | } | 351 | } |
338 | out: | 352 | return 1; |
339 | WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); | ||
340 | return res; | ||
341 | } | 353 | } |
342 | 354 | ||
343 | /** | 355 | /** |
356 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | ||
357 | * @desc: pointer to io descriptor | ||
358 | */ | ||
359 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | ||
360 | { | ||
361 | nfs_pageio_doio(desc); | ||
362 | } | ||
363 | |||
364 | #define NFS_SCAN_MAXENTRIES 16 | ||
365 | /** | ||
344 | * nfs_scan_list - Scan a list for matching requests | 366 | * nfs_scan_list - Scan a list for matching requests |
345 | * @nfsi: NFS inode | 367 | * @nfsi: NFS inode |
346 | * @head: One of the NFS inode request lists | 368 | * @head: One of the NFS inode request lists |
@@ -355,12 +377,12 @@ out: | |||
355 | * You must be holding the inode's req_lock when calling this function | 377 | * You must be holding the inode's req_lock when calling this function |
356 | */ | 378 | */ |
357 | int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, | 379 | int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, |
358 | struct list_head *dst, unsigned long idx_start, | 380 | struct list_head *dst, pgoff_t idx_start, |
359 | unsigned int npages) | 381 | unsigned int npages) |
360 | { | 382 | { |
361 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; | 383 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
362 | struct nfs_page *req; | 384 | struct nfs_page *req; |
363 | unsigned long idx_end; | 385 | pgoff_t idx_end; |
364 | int found, i; | 386 | int found, i; |
365 | int res; | 387 | int res; |
366 | 388 | ||
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 6ab4d5a9edf2..9a55807b2a70 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -27,7 +27,8 @@ | |||
27 | 27 | ||
28 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE | 28 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
29 | 29 | ||
30 | static int nfs_pagein_one(struct list_head *, struct inode *); | 30 | static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int); |
31 | static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int); | ||
31 | static const struct rpc_call_ops nfs_read_partial_ops; | 32 | static const struct rpc_call_ops nfs_read_partial_ops; |
32 | static const struct rpc_call_ops nfs_read_full_ops; | 33 | static const struct rpc_call_ops nfs_read_full_ops; |
33 | 34 | ||
@@ -36,9 +37,8 @@ static mempool_t *nfs_rdata_mempool; | |||
36 | 37 | ||
37 | #define MIN_POOL_READ (32) | 38 | #define MIN_POOL_READ (32) |
38 | 39 | ||
39 | struct nfs_read_data *nfs_readdata_alloc(size_t len) | 40 | struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) |
40 | { | 41 | { |
41 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
42 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); | 42 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); |
43 | 43 | ||
44 | if (p) { | 44 | if (p) { |
@@ -133,7 +133,10 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, | |||
133 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); | 133 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); |
134 | 134 | ||
135 | nfs_list_add_request(new, &one_request); | 135 | nfs_list_add_request(new, &one_request); |
136 | nfs_pagein_one(&one_request, inode); | 136 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) |
137 | nfs_pagein_multi(inode, &one_request, 1, len, 0); | ||
138 | else | ||
139 | nfs_pagein_one(inode, &one_request, 1, len, 0); | ||
137 | return 0; | 140 | return 0; |
138 | } | 141 | } |
139 | 142 | ||
@@ -230,7 +233,7 @@ static void nfs_execute_read(struct nfs_read_data *data) | |||
230 | * won't see the new data until our attribute cache is updated. This is more | 233 | * won't see the new data until our attribute cache is updated. This is more |
231 | * or less conventional NFS client behavior. | 234 | * or less conventional NFS client behavior. |
232 | */ | 235 | */ |
233 | static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | 236 | static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) |
234 | { | 237 | { |
235 | struct nfs_page *req = nfs_list_entry(head->next); | 238 | struct nfs_page *req = nfs_list_entry(head->next); |
236 | struct page *page = req->wb_page; | 239 | struct page *page = req->wb_page; |
@@ -242,11 +245,11 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | |||
242 | 245 | ||
243 | nfs_list_remove_request(req); | 246 | nfs_list_remove_request(req); |
244 | 247 | ||
245 | nbytes = req->wb_bytes; | 248 | nbytes = count; |
246 | do { | 249 | do { |
247 | size_t len = min(nbytes,rsize); | 250 | size_t len = min(nbytes,rsize); |
248 | 251 | ||
249 | data = nfs_readdata_alloc(len); | 252 | data = nfs_readdata_alloc(1); |
250 | if (!data) | 253 | if (!data) |
251 | goto out_bad; | 254 | goto out_bad; |
252 | INIT_LIST_HEAD(&data->pages); | 255 | INIT_LIST_HEAD(&data->pages); |
@@ -258,23 +261,19 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | |||
258 | 261 | ||
259 | ClearPageError(page); | 262 | ClearPageError(page); |
260 | offset = 0; | 263 | offset = 0; |
261 | nbytes = req->wb_bytes; | 264 | nbytes = count; |
262 | do { | 265 | do { |
263 | data = list_entry(list.next, struct nfs_read_data, pages); | 266 | data = list_entry(list.next, struct nfs_read_data, pages); |
264 | list_del_init(&data->pages); | 267 | list_del_init(&data->pages); |
265 | 268 | ||
266 | data->pagevec[0] = page; | 269 | data->pagevec[0] = page; |
267 | 270 | ||
268 | if (nbytes > rsize) { | 271 | if (nbytes < rsize) |
269 | nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, | 272 | rsize = nbytes; |
270 | rsize, offset); | 273 | nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, |
271 | offset += rsize; | 274 | rsize, offset); |
272 | nbytes -= rsize; | 275 | offset += rsize; |
273 | } else { | 276 | nbytes -= rsize; |
274 | nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, | ||
275 | nbytes, offset); | ||
276 | nbytes = 0; | ||
277 | } | ||
278 | nfs_execute_read(data); | 277 | nfs_execute_read(data); |
279 | } while (nbytes != 0); | 278 | } while (nbytes != 0); |
280 | 279 | ||
@@ -291,30 +290,24 @@ out_bad: | |||
291 | return -ENOMEM; | 290 | return -ENOMEM; |
292 | } | 291 | } |
293 | 292 | ||
294 | static int nfs_pagein_one(struct list_head *head, struct inode *inode) | 293 | static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) |
295 | { | 294 | { |
296 | struct nfs_page *req; | 295 | struct nfs_page *req; |
297 | struct page **pages; | 296 | struct page **pages; |
298 | struct nfs_read_data *data; | 297 | struct nfs_read_data *data; |
299 | unsigned int count; | ||
300 | 298 | ||
301 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) | 299 | data = nfs_readdata_alloc(npages); |
302 | return nfs_pagein_multi(head, inode); | ||
303 | |||
304 | data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize); | ||
305 | if (!data) | 300 | if (!data) |
306 | goto out_bad; | 301 | goto out_bad; |
307 | 302 | ||
308 | INIT_LIST_HEAD(&data->pages); | 303 | INIT_LIST_HEAD(&data->pages); |
309 | pages = data->pagevec; | 304 | pages = data->pagevec; |
310 | count = 0; | ||
311 | while (!list_empty(head)) { | 305 | while (!list_empty(head)) { |
312 | req = nfs_list_entry(head->next); | 306 | req = nfs_list_entry(head->next); |
313 | nfs_list_remove_request(req); | 307 | nfs_list_remove_request(req); |
314 | nfs_list_add_request(req, &data->pages); | 308 | nfs_list_add_request(req, &data->pages); |
315 | ClearPageError(req->wb_page); | 309 | ClearPageError(req->wb_page); |
316 | *pages++ = req->wb_page; | 310 | *pages++ = req->wb_page; |
317 | count += req->wb_bytes; | ||
318 | } | 311 | } |
319 | req = nfs_list_entry(data->pages.next); | 312 | req = nfs_list_entry(data->pages.next); |
320 | 313 | ||
@@ -327,28 +320,6 @@ out_bad: | |||
327 | return -ENOMEM; | 320 | return -ENOMEM; |
328 | } | 321 | } |
329 | 322 | ||
330 | static int | ||
331 | nfs_pagein_list(struct list_head *head, int rpages) | ||
332 | { | ||
333 | LIST_HEAD(one_request); | ||
334 | struct nfs_page *req; | ||
335 | int error = 0; | ||
336 | unsigned int pages = 0; | ||
337 | |||
338 | while (!list_empty(head)) { | ||
339 | pages += nfs_coalesce_requests(head, &one_request, rpages); | ||
340 | req = nfs_list_entry(one_request.next); | ||
341 | error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode); | ||
342 | if (error < 0) | ||
343 | break; | ||
344 | } | ||
345 | if (error >= 0) | ||
346 | return pages; | ||
347 | |||
348 | nfs_async_read_error(head); | ||
349 | return error; | ||
350 | } | ||
351 | |||
352 | /* | 323 | /* |
353 | * This is the callback from RPC telling us whether a reply was | 324 | * This is the callback from RPC telling us whether a reply was |
354 | * received or some error occurred (timeout or socket shutdown). | 325 | * received or some error occurred (timeout or socket shutdown). |
@@ -538,7 +509,7 @@ out_error: | |||
538 | } | 509 | } |
539 | 510 | ||
540 | struct nfs_readdesc { | 511 | struct nfs_readdesc { |
541 | struct list_head *head; | 512 | struct nfs_pageio_descriptor *pgio; |
542 | struct nfs_open_context *ctx; | 513 | struct nfs_open_context *ctx; |
543 | }; | 514 | }; |
544 | 515 | ||
@@ -562,19 +533,21 @@ readpage_async_filler(void *data, struct page *page) | |||
562 | } | 533 | } |
563 | if (len < PAGE_CACHE_SIZE) | 534 | if (len < PAGE_CACHE_SIZE) |
564 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); | 535 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); |
565 | nfs_list_add_request(new, desc->head); | 536 | nfs_pageio_add_request(desc->pgio, new); |
566 | return 0; | 537 | return 0; |
567 | } | 538 | } |
568 | 539 | ||
569 | int nfs_readpages(struct file *filp, struct address_space *mapping, | 540 | int nfs_readpages(struct file *filp, struct address_space *mapping, |
570 | struct list_head *pages, unsigned nr_pages) | 541 | struct list_head *pages, unsigned nr_pages) |
571 | { | 542 | { |
572 | LIST_HEAD(head); | 543 | struct nfs_pageio_descriptor pgio; |
573 | struct nfs_readdesc desc = { | 544 | struct nfs_readdesc desc = { |
574 | .head = &head, | 545 | .pgio = &pgio, |
575 | }; | 546 | }; |
576 | struct inode *inode = mapping->host; | 547 | struct inode *inode = mapping->host; |
577 | struct nfs_server *server = NFS_SERVER(inode); | 548 | struct nfs_server *server = NFS_SERVER(inode); |
549 | size_t rsize = server->rsize; | ||
550 | unsigned long npages; | ||
578 | int ret = -ESTALE; | 551 | int ret = -ESTALE; |
579 | 552 | ||
580 | dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", | 553 | dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", |
@@ -593,13 +566,16 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, | |||
593 | } else | 566 | } else |
594 | desc.ctx = get_nfs_open_context((struct nfs_open_context *) | 567 | desc.ctx = get_nfs_open_context((struct nfs_open_context *) |
595 | filp->private_data); | 568 | filp->private_data); |
569 | if (rsize < PAGE_CACHE_SIZE) | ||
570 | nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0); | ||
571 | else | ||
572 | nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0); | ||
573 | |||
596 | ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); | 574 | ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); |
597 | if (!list_empty(&head)) { | 575 | |
598 | int err = nfs_pagein_list(&head, server->rpages); | 576 | nfs_pageio_complete(&pgio); |
599 | if (!ret) | 577 | npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
600 | nfs_add_stats(inode, NFSIOS_READPAGES, err); | 578 | nfs_add_stats(inode, NFSIOS_READPAGES, npages); |
601 | ret = err; | ||
602 | } | ||
603 | put_nfs_open_context(desc.ctx); | 579 | put_nfs_open_context(desc.ctx); |
604 | out: | 580 | out: |
605 | return ret; | 581 | return ret; |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f1eae44b9a1a..ca20d3cc2609 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -204,9 +204,9 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
204 | lock_kernel(); | 204 | lock_kernel(); |
205 | 205 | ||
206 | error = server->nfs_client->rpc_ops->statfs(server, fh, &res); | 206 | error = server->nfs_client->rpc_ops->statfs(server, fh, &res); |
207 | buf->f_type = NFS_SUPER_MAGIC; | ||
208 | if (error < 0) | 207 | if (error < 0) |
209 | goto out_err; | 208 | goto out_err; |
209 | buf->f_type = NFS_SUPER_MAGIC; | ||
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Current versions of glibc do not correctly handle the | 212 | * Current versions of glibc do not correctly handle the |
@@ -233,15 +233,14 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
233 | buf->f_ffree = res.afiles; | 233 | buf->f_ffree = res.afiles; |
234 | 234 | ||
235 | buf->f_namelen = server->namelen; | 235 | buf->f_namelen = server->namelen; |
236 | out: | 236 | |
237 | unlock_kernel(); | 237 | unlock_kernel(); |
238 | return 0; | 238 | return 0; |
239 | 239 | ||
240 | out_err: | 240 | out_err: |
241 | dprintk("%s: statfs error = %d\n", __FUNCTION__, -error); | 241 | dprintk("%s: statfs error = %d\n", __FUNCTION__, -error); |
242 | buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; | 242 | unlock_kernel(); |
243 | goto out; | 243 | return error; |
244 | |||
245 | } | 244 | } |
246 | 245 | ||
247 | /* | 246 | /* |
@@ -291,6 +290,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, | |||
291 | { NFS_MOUNT_NOAC, ",noac", "" }, | 290 | { NFS_MOUNT_NOAC, ",noac", "" }, |
292 | { NFS_MOUNT_NONLM, ",nolock", "" }, | 291 | { NFS_MOUNT_NONLM, ",nolock", "" }, |
293 | { NFS_MOUNT_NOACL, ",noacl", "" }, | 292 | { NFS_MOUNT_NOACL, ",noacl", "" }, |
293 | { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, | ||
294 | { 0, NULL, NULL } | 294 | { 0, NULL, NULL } |
295 | }; | 295 | }; |
296 | const struct proc_nfs_info *nfs_infop; | 296 | const struct proc_nfs_info *nfs_infop; |
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c index f4a0548b9ce8..bc2821331c29 100644 --- a/fs/nfs/symlink.c +++ b/fs/nfs/symlink.c | |||
@@ -61,15 +61,9 @@ static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
61 | err = page; | 61 | err = page; |
62 | goto read_failed; | 62 | goto read_failed; |
63 | } | 63 | } |
64 | if (!PageUptodate(page)) { | ||
65 | err = ERR_PTR(-EIO); | ||
66 | goto getlink_read_error; | ||
67 | } | ||
68 | nd_set_link(nd, kmap(page)); | 64 | nd_set_link(nd, kmap(page)); |
69 | return page; | 65 | return page; |
70 | 66 | ||
71 | getlink_read_error: | ||
72 | page_cache_release(page); | ||
73 | read_failed: | 67 | read_failed: |
74 | nd_set_link(nd, err); | 68 | nd_set_link(nd, err); |
75 | return NULL; | 69 | return NULL; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index ad2e91b4904f..5d44b8bd1070 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -38,8 +38,8 @@ | |||
38 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, | 38 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, |
39 | struct page *, | 39 | struct page *, |
40 | unsigned int, unsigned int); | 40 | unsigned int, unsigned int); |
41 | static void nfs_mark_request_dirty(struct nfs_page *req); | 41 | static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, |
42 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); | 42 | struct inode *inode, int ioflags); |
43 | static const struct rpc_call_ops nfs_write_partial_ops; | 43 | static const struct rpc_call_ops nfs_write_partial_ops; |
44 | static const struct rpc_call_ops nfs_write_full_ops; | 44 | static const struct rpc_call_ops nfs_write_full_ops; |
45 | static const struct rpc_call_ops nfs_commit_ops; | 45 | static const struct rpc_call_ops nfs_commit_ops; |
@@ -72,9 +72,8 @@ void nfs_commit_free(struct nfs_write_data *wdata) | |||
72 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); | 72 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); |
73 | } | 73 | } |
74 | 74 | ||
75 | struct nfs_write_data *nfs_writedata_alloc(size_t len) | 75 | struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) |
76 | { | 76 | { |
77 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
78 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); | 77 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); |
79 | 78 | ||
80 | if (p) { | 79 | if (p) { |
@@ -140,7 +139,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c | |||
140 | { | 139 | { |
141 | struct inode *inode = page->mapping->host; | 140 | struct inode *inode = page->mapping->host; |
142 | loff_t end, i_size = i_size_read(inode); | 141 | loff_t end, i_size = i_size_read(inode); |
143 | unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; | 142 | pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; |
144 | 143 | ||
145 | if (i_size > 0 && page->index < end_index) | 144 | if (i_size > 0 && page->index < end_index) |
146 | return; | 145 | return; |
@@ -202,7 +201,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | |||
202 | static int wb_priority(struct writeback_control *wbc) | 201 | static int wb_priority(struct writeback_control *wbc) |
203 | { | 202 | { |
204 | if (wbc->for_reclaim) | 203 | if (wbc->for_reclaim) |
205 | return FLUSH_HIGHPRI; | 204 | return FLUSH_HIGHPRI | FLUSH_STABLE; |
206 | if (wbc->for_kupdate) | 205 | if (wbc->for_kupdate) |
207 | return FLUSH_LOWPRI; | 206 | return FLUSH_LOWPRI; |
208 | return 0; | 207 | return 0; |
@@ -252,10 +251,12 @@ static void nfs_end_page_writeback(struct page *page) | |||
252 | * was not tagged. | 251 | * was not tagged. |
253 | * May also return an error if the user signalled nfs_wait_on_request(). | 252 | * May also return an error if the user signalled nfs_wait_on_request(). |
254 | */ | 253 | */ |
255 | static int nfs_page_mark_flush(struct page *page) | 254 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, |
255 | struct page *page) | ||
256 | { | 256 | { |
257 | struct nfs_page *req; | 257 | struct nfs_page *req; |
258 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | 258 | struct nfs_inode *nfsi = NFS_I(page->mapping->host); |
259 | spinlock_t *req_lock = &nfsi->req_lock; | ||
259 | int ret; | 260 | int ret; |
260 | 261 | ||
261 | spin_lock(req_lock); | 262 | spin_lock(req_lock); |
@@ -273,19 +274,30 @@ static int nfs_page_mark_flush(struct page *page) | |||
273 | * request as dirty (in which case we don't care). | 274 | * request as dirty (in which case we don't care). |
274 | */ | 275 | */ |
275 | spin_unlock(req_lock); | 276 | spin_unlock(req_lock); |
277 | /* Prevent deadlock! */ | ||
278 | nfs_pageio_complete(pgio); | ||
276 | ret = nfs_wait_on_request(req); | 279 | ret = nfs_wait_on_request(req); |
277 | nfs_release_request(req); | 280 | nfs_release_request(req); |
278 | if (ret != 0) | 281 | if (ret != 0) |
279 | return ret; | 282 | return ret; |
280 | spin_lock(req_lock); | 283 | spin_lock(req_lock); |
281 | } | 284 | } |
282 | spin_unlock(req_lock); | 285 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { |
283 | if (nfs_set_page_writeback(page) == 0) { | 286 | /* This request is marked for commit */ |
284 | nfs_list_remove_request(req); | 287 | spin_unlock(req_lock); |
285 | nfs_mark_request_dirty(req); | 288 | nfs_unlock_request(req); |
289 | nfs_pageio_complete(pgio); | ||
290 | return 1; | ||
291 | } | ||
292 | if (nfs_set_page_writeback(page) != 0) { | ||
293 | spin_unlock(req_lock); | ||
294 | BUG(); | ||
286 | } | 295 | } |
296 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, | ||
297 | NFS_PAGE_TAG_WRITEBACK); | ||
287 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); | 298 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); |
288 | nfs_unlock_request(req); | 299 | spin_unlock(req_lock); |
300 | nfs_pageio_add_request(pgio, req); | ||
289 | return ret; | 301 | return ret; |
290 | } | 302 | } |
291 | 303 | ||
@@ -294,6 +306,7 @@ static int nfs_page_mark_flush(struct page *page) | |||
294 | */ | 306 | */ |
295 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) | 307 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) |
296 | { | 308 | { |
309 | struct nfs_pageio_descriptor mypgio, *pgio; | ||
297 | struct nfs_open_context *ctx; | 310 | struct nfs_open_context *ctx; |
298 | struct inode *inode = page->mapping->host; | 311 | struct inode *inode = page->mapping->host; |
299 | unsigned offset; | 312 | unsigned offset; |
@@ -302,7 +315,14 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc | |||
302 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | 315 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
303 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 316 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
304 | 317 | ||
305 | err = nfs_page_mark_flush(page); | 318 | if (wbc->for_writepages) |
319 | pgio = wbc->fs_private; | ||
320 | else { | ||
321 | nfs_pageio_init_write(&mypgio, inode, wb_priority(wbc)); | ||
322 | pgio = &mypgio; | ||
323 | } | ||
324 | |||
325 | err = nfs_page_async_flush(pgio, page); | ||
306 | if (err <= 0) | 326 | if (err <= 0) |
307 | goto out; | 327 | goto out; |
308 | err = 0; | 328 | err = 0; |
@@ -319,12 +339,12 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc | |||
319 | put_nfs_open_context(ctx); | 339 | put_nfs_open_context(ctx); |
320 | if (err != 0) | 340 | if (err != 0) |
321 | goto out; | 341 | goto out; |
322 | err = nfs_page_mark_flush(page); | 342 | err = nfs_page_async_flush(pgio, page); |
323 | if (err > 0) | 343 | if (err > 0) |
324 | err = 0; | 344 | err = 0; |
325 | out: | 345 | out: |
326 | if (!wbc->for_writepages) | 346 | if (!wbc->for_writepages) |
327 | nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc)); | 347 | nfs_pageio_complete(pgio); |
328 | return err; | 348 | return err; |
329 | } | 349 | } |
330 | 350 | ||
@@ -340,20 +360,20 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) | |||
340 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | 360 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) |
341 | { | 361 | { |
342 | struct inode *inode = mapping->host; | 362 | struct inode *inode = mapping->host; |
363 | struct nfs_pageio_descriptor pgio; | ||
343 | int err; | 364 | int err; |
344 | 365 | ||
345 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); | 366 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
346 | 367 | ||
368 | nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); | ||
369 | wbc->fs_private = &pgio; | ||
347 | err = generic_writepages(mapping, wbc); | 370 | err = generic_writepages(mapping, wbc); |
371 | nfs_pageio_complete(&pgio); | ||
348 | if (err) | 372 | if (err) |
349 | return err; | 373 | return err; |
350 | err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); | 374 | if (pgio.pg_error) |
351 | if (err < 0) | 375 | return pgio.pg_error; |
352 | goto out; | 376 | return 0; |
353 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); | ||
354 | err = 0; | ||
355 | out: | ||
356 | return err; | ||
357 | } | 377 | } |
358 | 378 | ||
359 | /* | 379 | /* |
@@ -376,6 +396,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) | |||
376 | } | 396 | } |
377 | SetPagePrivate(req->wb_page); | 397 | SetPagePrivate(req->wb_page); |
378 | set_page_private(req->wb_page, (unsigned long)req); | 398 | set_page_private(req->wb_page, (unsigned long)req); |
399 | if (PageDirty(req->wb_page)) | ||
400 | set_bit(PG_NEED_FLUSH, &req->wb_flags); | ||
379 | nfsi->npages++; | 401 | nfsi->npages++; |
380 | atomic_inc(&req->wb_count); | 402 | atomic_inc(&req->wb_count); |
381 | return 0; | 403 | return 0; |
@@ -395,6 +417,8 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
395 | set_page_private(req->wb_page, 0); | 417 | set_page_private(req->wb_page, 0); |
396 | ClearPagePrivate(req->wb_page); | 418 | ClearPagePrivate(req->wb_page); |
397 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); | 419 | radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); |
420 | if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags)) | ||
421 | __set_page_dirty_nobuffers(req->wb_page); | ||
398 | nfsi->npages--; | 422 | nfsi->npages--; |
399 | if (!nfsi->npages) { | 423 | if (!nfsi->npages) { |
400 | spin_unlock(&nfsi->req_lock); | 424 | spin_unlock(&nfsi->req_lock); |
@@ -406,24 +430,6 @@ static void nfs_inode_remove_request(struct nfs_page *req) | |||
406 | nfs_release_request(req); | 430 | nfs_release_request(req); |
407 | } | 431 | } |
408 | 432 | ||
409 | /* | ||
410 | * Add a request to the inode's dirty list. | ||
411 | */ | ||
412 | static void | ||
413 | nfs_mark_request_dirty(struct nfs_page *req) | ||
414 | { | ||
415 | struct inode *inode = req->wb_context->dentry->d_inode; | ||
416 | struct nfs_inode *nfsi = NFS_I(inode); | ||
417 | |||
418 | spin_lock(&nfsi->req_lock); | ||
419 | radix_tree_tag_set(&nfsi->nfs_page_tree, | ||
420 | req->wb_index, NFS_PAGE_TAG_DIRTY); | ||
421 | nfs_list_add_request(req, &nfsi->dirty); | ||
422 | nfsi->ndirty++; | ||
423 | spin_unlock(&nfsi->req_lock); | ||
424 | __mark_inode_dirty(inode, I_DIRTY_PAGES); | ||
425 | } | ||
426 | |||
427 | static void | 433 | static void |
428 | nfs_redirty_request(struct nfs_page *req) | 434 | nfs_redirty_request(struct nfs_page *req) |
429 | { | 435 | { |
@@ -438,7 +444,7 @@ nfs_dirty_request(struct nfs_page *req) | |||
438 | { | 444 | { |
439 | struct page *page = req->wb_page; | 445 | struct page *page = req->wb_page; |
440 | 446 | ||
441 | if (page == NULL) | 447 | if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags)) |
442 | return 0; | 448 | return 0; |
443 | return !PageWriteback(req->wb_page); | 449 | return !PageWriteback(req->wb_page); |
444 | } | 450 | } |
@@ -456,10 +462,48 @@ nfs_mark_request_commit(struct nfs_page *req) | |||
456 | spin_lock(&nfsi->req_lock); | 462 | spin_lock(&nfsi->req_lock); |
457 | nfs_list_add_request(req, &nfsi->commit); | 463 | nfs_list_add_request(req, &nfsi->commit); |
458 | nfsi->ncommit++; | 464 | nfsi->ncommit++; |
465 | set_bit(PG_NEED_COMMIT, &(req)->wb_flags); | ||
459 | spin_unlock(&nfsi->req_lock); | 466 | spin_unlock(&nfsi->req_lock); |
460 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 467 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
461 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); | 468 | __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
462 | } | 469 | } |
470 | |||
471 | static inline | ||
472 | int nfs_write_need_commit(struct nfs_write_data *data) | ||
473 | { | ||
474 | return data->verf.committed != NFS_FILE_SYNC; | ||
475 | } | ||
476 | |||
477 | static inline | ||
478 | int nfs_reschedule_unstable_write(struct nfs_page *req) | ||
479 | { | ||
480 | if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) { | ||
481 | nfs_mark_request_commit(req); | ||
482 | return 1; | ||
483 | } | ||
484 | if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) { | ||
485 | nfs_redirty_request(req); | ||
486 | return 1; | ||
487 | } | ||
488 | return 0; | ||
489 | } | ||
490 | #else | ||
491 | static inline void | ||
492 | nfs_mark_request_commit(struct nfs_page *req) | ||
493 | { | ||
494 | } | ||
495 | |||
496 | static inline | ||
497 | int nfs_write_need_commit(struct nfs_write_data *data) | ||
498 | { | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | static inline | ||
503 | int nfs_reschedule_unstable_write(struct nfs_page *req) | ||
504 | { | ||
505 | return 0; | ||
506 | } | ||
463 | #endif | 507 | #endif |
464 | 508 | ||
465 | /* | 509 | /* |
@@ -467,11 +511,11 @@ nfs_mark_request_commit(struct nfs_page *req) | |||
467 | * | 511 | * |
468 | * Interruptible by signals only if mounted with intr flag. | 512 | * Interruptible by signals only if mounted with intr flag. |
469 | */ | 513 | */ |
470 | static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages) | 514 | static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) |
471 | { | 515 | { |
472 | struct nfs_inode *nfsi = NFS_I(inode); | 516 | struct nfs_inode *nfsi = NFS_I(inode); |
473 | struct nfs_page *req; | 517 | struct nfs_page *req; |
474 | unsigned long idx_end, next; | 518 | pgoff_t idx_end, next; |
475 | unsigned int res = 0; | 519 | unsigned int res = 0; |
476 | int error; | 520 | int error; |
477 | 521 | ||
@@ -500,18 +544,6 @@ static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_st | |||
500 | return res; | 544 | return res; |
501 | } | 545 | } |
502 | 546 | ||
503 | static void nfs_cancel_dirty_list(struct list_head *head) | ||
504 | { | ||
505 | struct nfs_page *req; | ||
506 | while(!list_empty(head)) { | ||
507 | req = nfs_list_entry(head->next); | ||
508 | nfs_list_remove_request(req); | ||
509 | nfs_end_page_writeback(req->wb_page); | ||
510 | nfs_inode_remove_request(req); | ||
511 | nfs_clear_page_writeback(req); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | static void nfs_cancel_commit_list(struct list_head *head) | 547 | static void nfs_cancel_commit_list(struct list_head *head) |
516 | { | 548 | { |
517 | struct nfs_page *req; | 549 | struct nfs_page *req; |
@@ -520,6 +552,7 @@ static void nfs_cancel_commit_list(struct list_head *head) | |||
520 | req = nfs_list_entry(head->next); | 552 | req = nfs_list_entry(head->next); |
521 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 553 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
522 | nfs_list_remove_request(req); | 554 | nfs_list_remove_request(req); |
555 | clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); | ||
523 | nfs_inode_remove_request(req); | 556 | nfs_inode_remove_request(req); |
524 | nfs_unlock_request(req); | 557 | nfs_unlock_request(req); |
525 | } | 558 | } |
@@ -537,7 +570,7 @@ static void nfs_cancel_commit_list(struct list_head *head) | |||
537 | * The requests are *not* checked to ensure that they form a contiguous set. | 570 | * The requests are *not* checked to ensure that they form a contiguous set. |
538 | */ | 571 | */ |
539 | static int | 572 | static int |
540 | nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 573 | nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) |
541 | { | 574 | { |
542 | struct nfs_inode *nfsi = NFS_I(inode); | 575 | struct nfs_inode *nfsi = NFS_I(inode); |
543 | int res = 0; | 576 | int res = 0; |
@@ -551,40 +584,12 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_st | |||
551 | return res; | 584 | return res; |
552 | } | 585 | } |
553 | #else | 586 | #else |
554 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 587 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) |
555 | { | 588 | { |
556 | return 0; | 589 | return 0; |
557 | } | 590 | } |
558 | #endif | 591 | #endif |
559 | 592 | ||
560 | static int nfs_wait_on_write_congestion(struct address_space *mapping) | ||
561 | { | ||
562 | struct inode *inode = mapping->host; | ||
563 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
564 | int ret = 0; | ||
565 | |||
566 | might_sleep(); | ||
567 | |||
568 | if (!bdi_write_congested(bdi)) | ||
569 | return 0; | ||
570 | |||
571 | nfs_inc_stats(inode, NFSIOS_CONGESTIONWAIT); | ||
572 | |||
573 | do { | ||
574 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | ||
575 | sigset_t oldset; | ||
576 | |||
577 | rpc_clnt_sigmask(clnt, &oldset); | ||
578 | ret = congestion_wait_interruptible(WRITE, HZ/10); | ||
579 | rpc_clnt_sigunmask(clnt, &oldset); | ||
580 | if (ret == -ERESTARTSYS) | ||
581 | break; | ||
582 | ret = 0; | ||
583 | } while (bdi_write_congested(bdi)); | ||
584 | |||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | /* | 593 | /* |
589 | * Try to update any existing write request, or create one if there is none. | 594 | * Try to update any existing write request, or create one if there is none. |
590 | * In order to match, the request's credentials must match those of | 595 | * In order to match, the request's credentials must match those of |
@@ -599,12 +604,10 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
599 | struct inode *inode = mapping->host; | 604 | struct inode *inode = mapping->host; |
600 | struct nfs_inode *nfsi = NFS_I(inode); | 605 | struct nfs_inode *nfsi = NFS_I(inode); |
601 | struct nfs_page *req, *new = NULL; | 606 | struct nfs_page *req, *new = NULL; |
602 | unsigned long rqend, end; | 607 | pgoff_t rqend, end; |
603 | 608 | ||
604 | end = offset + bytes; | 609 | end = offset + bytes; |
605 | 610 | ||
606 | if (nfs_wait_on_write_congestion(mapping)) | ||
607 | return ERR_PTR(-ERESTARTSYS); | ||
608 | for (;;) { | 611 | for (;;) { |
609 | /* Loop over all inode entries and see if we find | 612 | /* Loop over all inode entries and see if we find |
610 | * A request for the page we wish to update | 613 | * A request for the page we wish to update |
@@ -746,26 +749,12 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
746 | 749 | ||
747 | static void nfs_writepage_release(struct nfs_page *req) | 750 | static void nfs_writepage_release(struct nfs_page *req) |
748 | { | 751 | { |
749 | nfs_end_page_writeback(req->wb_page); | ||
750 | |||
751 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | ||
752 | if (!PageError(req->wb_page)) { | ||
753 | if (NFS_NEED_RESCHED(req)) { | ||
754 | nfs_redirty_request(req); | ||
755 | goto out; | ||
756 | } else if (NFS_NEED_COMMIT(req)) { | ||
757 | nfs_mark_request_commit(req); | ||
758 | goto out; | ||
759 | } | ||
760 | } | ||
761 | nfs_inode_remove_request(req); | ||
762 | 752 | ||
763 | out: | 753 | if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) { |
764 | nfs_clear_commit(req); | 754 | nfs_end_page_writeback(req->wb_page); |
765 | nfs_clear_reschedule(req); | 755 | nfs_inode_remove_request(req); |
766 | #else | 756 | } else |
767 | nfs_inode_remove_request(req); | 757 | nfs_end_page_writeback(req->wb_page); |
768 | #endif | ||
769 | nfs_clear_page_writeback(req); | 758 | nfs_clear_page_writeback(req); |
770 | } | 759 | } |
771 | 760 | ||
@@ -842,7 +831,7 @@ static void nfs_execute_write(struct nfs_write_data *data) | |||
842 | * Generate multiple small requests to write out a single | 831 | * Generate multiple small requests to write out a single |
843 | * contiguous dirty area on one page. | 832 | * contiguous dirty area on one page. |
844 | */ | 833 | */ |
845 | static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | 834 | static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how) |
846 | { | 835 | { |
847 | struct nfs_page *req = nfs_list_entry(head->next); | 836 | struct nfs_page *req = nfs_list_entry(head->next); |
848 | struct page *page = req->wb_page; | 837 | struct page *page = req->wb_page; |
@@ -854,11 +843,11 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | |||
854 | 843 | ||
855 | nfs_list_remove_request(req); | 844 | nfs_list_remove_request(req); |
856 | 845 | ||
857 | nbytes = req->wb_bytes; | 846 | nbytes = count; |
858 | do { | 847 | do { |
859 | size_t len = min(nbytes, wsize); | 848 | size_t len = min(nbytes, wsize); |
860 | 849 | ||
861 | data = nfs_writedata_alloc(len); | 850 | data = nfs_writedata_alloc(1); |
862 | if (!data) | 851 | if (!data) |
863 | goto out_bad; | 852 | goto out_bad; |
864 | list_add(&data->pages, &list); | 853 | list_add(&data->pages, &list); |
@@ -869,23 +858,19 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | |||
869 | 858 | ||
870 | ClearPageError(page); | 859 | ClearPageError(page); |
871 | offset = 0; | 860 | offset = 0; |
872 | nbytes = req->wb_bytes; | 861 | nbytes = count; |
873 | do { | 862 | do { |
874 | data = list_entry(list.next, struct nfs_write_data, pages); | 863 | data = list_entry(list.next, struct nfs_write_data, pages); |
875 | list_del_init(&data->pages); | 864 | list_del_init(&data->pages); |
876 | 865 | ||
877 | data->pagevec[0] = page; | 866 | data->pagevec[0] = page; |
878 | 867 | ||
879 | if (nbytes > wsize) { | 868 | if (nbytes < wsize) |
880 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, | 869 | wsize = nbytes; |
881 | wsize, offset, how); | 870 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, |
882 | offset += wsize; | 871 | wsize, offset, how); |
883 | nbytes -= wsize; | 872 | offset += wsize; |
884 | } else { | 873 | nbytes -= wsize; |
885 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, | ||
886 | nbytes, offset, how); | ||
887 | nbytes = 0; | ||
888 | } | ||
889 | nfs_execute_write(data); | 874 | nfs_execute_write(data); |
890 | } while (nbytes != 0); | 875 | } while (nbytes != 0); |
891 | 876 | ||
@@ -897,8 +882,8 @@ out_bad: | |||
897 | list_del(&data->pages); | 882 | list_del(&data->pages); |
898 | nfs_writedata_release(data); | 883 | nfs_writedata_release(data); |
899 | } | 884 | } |
900 | nfs_end_page_writeback(req->wb_page); | ||
901 | nfs_redirty_request(req); | 885 | nfs_redirty_request(req); |
886 | nfs_end_page_writeback(req->wb_page); | ||
902 | nfs_clear_page_writeback(req); | 887 | nfs_clear_page_writeback(req); |
903 | return -ENOMEM; | 888 | return -ENOMEM; |
904 | } | 889 | } |
@@ -911,26 +896,23 @@ out_bad: | |||
911 | * This is the case if nfs_updatepage detects a conflicting request | 896 | * This is the case if nfs_updatepage detects a conflicting request |
912 | * that has been written but not committed. | 897 | * that has been written but not committed. |
913 | */ | 898 | */ |
914 | static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | 899 | static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how) |
915 | { | 900 | { |
916 | struct nfs_page *req; | 901 | struct nfs_page *req; |
917 | struct page **pages; | 902 | struct page **pages; |
918 | struct nfs_write_data *data; | 903 | struct nfs_write_data *data; |
919 | unsigned int count; | ||
920 | 904 | ||
921 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); | 905 | data = nfs_writedata_alloc(npages); |
922 | if (!data) | 906 | if (!data) |
923 | goto out_bad; | 907 | goto out_bad; |
924 | 908 | ||
925 | pages = data->pagevec; | 909 | pages = data->pagevec; |
926 | count = 0; | ||
927 | while (!list_empty(head)) { | 910 | while (!list_empty(head)) { |
928 | req = nfs_list_entry(head->next); | 911 | req = nfs_list_entry(head->next); |
929 | nfs_list_remove_request(req); | 912 | nfs_list_remove_request(req); |
930 | nfs_list_add_request(req, &data->pages); | 913 | nfs_list_add_request(req, &data->pages); |
931 | ClearPageError(req->wb_page); | 914 | ClearPageError(req->wb_page); |
932 | *pages++ = req->wb_page; | 915 | *pages++ = req->wb_page; |
933 | count += req->wb_bytes; | ||
934 | } | 916 | } |
935 | req = nfs_list_entry(data->pages.next); | 917 | req = nfs_list_entry(data->pages.next); |
936 | 918 | ||
@@ -943,47 +925,22 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | |||
943 | while (!list_empty(head)) { | 925 | while (!list_empty(head)) { |
944 | struct nfs_page *req = nfs_list_entry(head->next); | 926 | struct nfs_page *req = nfs_list_entry(head->next); |
945 | nfs_list_remove_request(req); | 927 | nfs_list_remove_request(req); |
946 | nfs_end_page_writeback(req->wb_page); | ||
947 | nfs_redirty_request(req); | 928 | nfs_redirty_request(req); |
929 | nfs_end_page_writeback(req->wb_page); | ||
948 | nfs_clear_page_writeback(req); | 930 | nfs_clear_page_writeback(req); |
949 | } | 931 | } |
950 | return -ENOMEM; | 932 | return -ENOMEM; |
951 | } | 933 | } |
952 | 934 | ||
953 | static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) | 935 | static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, |
936 | struct inode *inode, int ioflags) | ||
954 | { | 937 | { |
955 | LIST_HEAD(one_request); | ||
956 | int (*flush_one)(struct inode *, struct list_head *, int); | ||
957 | struct nfs_page *req; | ||
958 | int wpages = NFS_SERVER(inode)->wpages; | ||
959 | int wsize = NFS_SERVER(inode)->wsize; | 938 | int wsize = NFS_SERVER(inode)->wsize; |
960 | int error; | ||
961 | 939 | ||
962 | flush_one = nfs_flush_one; | ||
963 | if (wsize < PAGE_CACHE_SIZE) | 940 | if (wsize < PAGE_CACHE_SIZE) |
964 | flush_one = nfs_flush_multi; | 941 | nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags); |
965 | /* For single writes, FLUSH_STABLE is more efficient */ | 942 | else |
966 | if (npages <= wpages && npages == NFS_I(inode)->npages | 943 | nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags); |
967 | && nfs_list_entry(head->next)->wb_bytes <= wsize) | ||
968 | how |= FLUSH_STABLE; | ||
969 | |||
970 | do { | ||
971 | nfs_coalesce_requests(head, &one_request, wpages); | ||
972 | req = nfs_list_entry(one_request.next); | ||
973 | error = flush_one(inode, &one_request, how); | ||
974 | if (error < 0) | ||
975 | goto out_err; | ||
976 | } while (!list_empty(head)); | ||
977 | return 0; | ||
978 | out_err: | ||
979 | while (!list_empty(head)) { | ||
980 | req = nfs_list_entry(head->next); | ||
981 | nfs_list_remove_request(req); | ||
982 | nfs_end_page_writeback(req->wb_page); | ||
983 | nfs_redirty_request(req); | ||
984 | nfs_clear_page_writeback(req); | ||
985 | } | ||
986 | return error; | ||
987 | } | 944 | } |
988 | 945 | ||
989 | /* | 946 | /* |
@@ -1008,22 +965,28 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) | |||
1008 | nfs_set_pageerror(page); | 965 | nfs_set_pageerror(page); |
1009 | req->wb_context->error = task->tk_status; | 966 | req->wb_context->error = task->tk_status; |
1010 | dprintk(", error = %d\n", task->tk_status); | 967 | dprintk(", error = %d\n", task->tk_status); |
1011 | } else { | 968 | goto out; |
1012 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | ||
1013 | if (data->verf.committed < NFS_FILE_SYNC) { | ||
1014 | if (!NFS_NEED_COMMIT(req)) { | ||
1015 | nfs_defer_commit(req); | ||
1016 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | ||
1017 | dprintk(" defer commit\n"); | ||
1018 | } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { | ||
1019 | nfs_defer_reschedule(req); | ||
1020 | dprintk(" server reboot detected\n"); | ||
1021 | } | ||
1022 | } else | ||
1023 | #endif | ||
1024 | dprintk(" OK\n"); | ||
1025 | } | 969 | } |
1026 | 970 | ||
971 | if (nfs_write_need_commit(data)) { | ||
972 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | ||
973 | |||
974 | spin_lock(req_lock); | ||
975 | if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) { | ||
976 | /* Do nothing we need to resend the writes */ | ||
977 | } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) { | ||
978 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | ||
979 | dprintk(" defer commit\n"); | ||
980 | } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) { | ||
981 | set_bit(PG_NEED_RESCHED, &req->wb_flags); | ||
982 | clear_bit(PG_NEED_COMMIT, &req->wb_flags); | ||
983 | dprintk(" server reboot detected\n"); | ||
984 | } | ||
985 | spin_unlock(req_lock); | ||
986 | } else | ||
987 | dprintk(" OK\n"); | ||
988 | |||
989 | out: | ||
1027 | if (atomic_dec_and_test(&req->wb_complete)) | 990 | if (atomic_dec_and_test(&req->wb_complete)) |
1028 | nfs_writepage_release(req); | 991 | nfs_writepage_release(req); |
1029 | } | 992 | } |
@@ -1064,25 +1027,21 @@ static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) | |||
1064 | if (task->tk_status < 0) { | 1027 | if (task->tk_status < 0) { |
1065 | nfs_set_pageerror(page); | 1028 | nfs_set_pageerror(page); |
1066 | req->wb_context->error = task->tk_status; | 1029 | req->wb_context->error = task->tk_status; |
1067 | nfs_end_page_writeback(page); | ||
1068 | nfs_inode_remove_request(req); | ||
1069 | dprintk(", error = %d\n", task->tk_status); | 1030 | dprintk(", error = %d\n", task->tk_status); |
1070 | goto next; | 1031 | goto remove_request; |
1071 | } | 1032 | } |
1072 | nfs_end_page_writeback(page); | ||
1073 | 1033 | ||
1074 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 1034 | if (nfs_write_need_commit(data)) { |
1075 | if (data->args.stable != NFS_UNSTABLE || data->verf.committed == NFS_FILE_SYNC) { | 1035 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); |
1076 | nfs_inode_remove_request(req); | 1036 | nfs_mark_request_commit(req); |
1077 | dprintk(" OK\n"); | 1037 | nfs_end_page_writeback(page); |
1038 | dprintk(" marked for commit\n"); | ||
1078 | goto next; | 1039 | goto next; |
1079 | } | 1040 | } |
1080 | memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); | 1041 | dprintk(" OK\n"); |
1081 | nfs_mark_request_commit(req); | 1042 | remove_request: |
1082 | dprintk(" marked for commit\n"); | 1043 | nfs_end_page_writeback(page); |
1083 | #else | ||
1084 | nfs_inode_remove_request(req); | 1044 | nfs_inode_remove_request(req); |
1085 | #endif | ||
1086 | next: | 1045 | next: |
1087 | nfs_clear_page_writeback(req); | 1046 | nfs_clear_page_writeback(req); |
1088 | } | 1047 | } |
@@ -1270,6 +1229,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
1270 | while (!list_empty(&data->pages)) { | 1229 | while (!list_empty(&data->pages)) { |
1271 | req = nfs_list_entry(data->pages.next); | 1230 | req = nfs_list_entry(data->pages.next); |
1272 | nfs_list_remove_request(req); | 1231 | nfs_list_remove_request(req); |
1232 | clear_bit(PG_NEED_COMMIT, &(req)->wb_flags); | ||
1273 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | 1233 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
1274 | 1234 | ||
1275 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", | 1235 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", |
@@ -1304,31 +1264,7 @@ static const struct rpc_call_ops nfs_commit_ops = { | |||
1304 | .rpc_call_done = nfs_commit_done, | 1264 | .rpc_call_done = nfs_commit_done, |
1305 | .rpc_release = nfs_commit_release, | 1265 | .rpc_release = nfs_commit_release, |
1306 | }; | 1266 | }; |
1307 | #else | ||
1308 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | ||
1309 | { | ||
1310 | return 0; | ||
1311 | } | ||
1312 | #endif | ||
1313 | |||
1314 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) | ||
1315 | { | ||
1316 | struct nfs_inode *nfsi = NFS_I(mapping->host); | ||
1317 | LIST_HEAD(head); | ||
1318 | long res; | ||
1319 | 1267 | ||
1320 | spin_lock(&nfsi->req_lock); | ||
1321 | res = nfs_scan_dirty(mapping, wbc, &head); | ||
1322 | spin_unlock(&nfsi->req_lock); | ||
1323 | if (res) { | ||
1324 | int error = nfs_flush_list(mapping->host, &head, res, how); | ||
1325 | if (error < 0) | ||
1326 | return error; | ||
1327 | } | ||
1328 | return res; | ||
1329 | } | ||
1330 | |||
1331 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | ||
1332 | int nfs_commit_inode(struct inode *inode, int how) | 1268 | int nfs_commit_inode(struct inode *inode, int how) |
1333 | { | 1269 | { |
1334 | struct nfs_inode *nfsi = NFS_I(inode); | 1270 | struct nfs_inode *nfsi = NFS_I(inode); |
@@ -1345,13 +1281,18 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
1345 | } | 1281 | } |
1346 | return res; | 1282 | return res; |
1347 | } | 1283 | } |
1284 | #else | ||
1285 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | ||
1286 | { | ||
1287 | return 0; | ||
1288 | } | ||
1348 | #endif | 1289 | #endif |
1349 | 1290 | ||
1350 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) | 1291 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) |
1351 | { | 1292 | { |
1352 | struct inode *inode = mapping->host; | 1293 | struct inode *inode = mapping->host; |
1353 | struct nfs_inode *nfsi = NFS_I(inode); | 1294 | struct nfs_inode *nfsi = NFS_I(inode); |
1354 | unsigned long idx_start, idx_end; | 1295 | pgoff_t idx_start, idx_end; |
1355 | unsigned int npages = 0; | 1296 | unsigned int npages = 0; |
1356 | LIST_HEAD(head); | 1297 | LIST_HEAD(head); |
1357 | int nocommit = how & FLUSH_NOCOMMIT; | 1298 | int nocommit = how & FLUSH_NOCOMMIT; |
@@ -1364,41 +1305,24 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr | |||
1364 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 1305 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; |
1365 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | 1306 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; |
1366 | if (idx_end > idx_start) { | 1307 | if (idx_end > idx_start) { |
1367 | unsigned long l_npages = 1 + idx_end - idx_start; | 1308 | pgoff_t l_npages = 1 + idx_end - idx_start; |
1368 | npages = l_npages; | 1309 | npages = l_npages; |
1369 | if (sizeof(npages) != sizeof(l_npages) && | 1310 | if (sizeof(npages) != sizeof(l_npages) && |
1370 | (unsigned long)npages != l_npages) | 1311 | (pgoff_t)npages != l_npages) |
1371 | npages = 0; | 1312 | npages = 0; |
1372 | } | 1313 | } |
1373 | } | 1314 | } |
1374 | how &= ~FLUSH_NOCOMMIT; | 1315 | how &= ~FLUSH_NOCOMMIT; |
1375 | spin_lock(&nfsi->req_lock); | 1316 | spin_lock(&nfsi->req_lock); |
1376 | do { | 1317 | do { |
1377 | wbc->pages_skipped = 0; | ||
1378 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | 1318 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); |
1379 | if (ret != 0) | 1319 | if (ret != 0) |
1380 | continue; | 1320 | continue; |
1381 | pages = nfs_scan_dirty(mapping, wbc, &head); | ||
1382 | if (pages != 0) { | ||
1383 | spin_unlock(&nfsi->req_lock); | ||
1384 | if (how & FLUSH_INVALIDATE) { | ||
1385 | nfs_cancel_dirty_list(&head); | ||
1386 | ret = pages; | ||
1387 | } else | ||
1388 | ret = nfs_flush_list(inode, &head, pages, how); | ||
1389 | spin_lock(&nfsi->req_lock); | ||
1390 | continue; | ||
1391 | } | ||
1392 | if (wbc->pages_skipped != 0) | ||
1393 | continue; | ||
1394 | if (nocommit) | 1321 | if (nocommit) |
1395 | break; | 1322 | break; |
1396 | pages = nfs_scan_commit(inode, &head, idx_start, npages); | 1323 | pages = nfs_scan_commit(inode, &head, idx_start, npages); |
1397 | if (pages == 0) { | 1324 | if (pages == 0) |
1398 | if (wbc->pages_skipped != 0) | ||
1399 | continue; | ||
1400 | break; | 1325 | break; |
1401 | } | ||
1402 | if (how & FLUSH_INVALIDATE) { | 1326 | if (how & FLUSH_INVALIDATE) { |
1403 | spin_unlock(&nfsi->req_lock); | 1327 | spin_unlock(&nfsi->req_lock); |
1404 | nfs_cancel_commit_list(&head); | 1328 | nfs_cancel_commit_list(&head); |
@@ -1430,7 +1354,7 @@ int nfs_wb_all(struct inode *inode) | |||
1430 | }; | 1354 | }; |
1431 | int ret; | 1355 | int ret; |
1432 | 1356 | ||
1433 | ret = generic_writepages(mapping, &wbc); | 1357 | ret = nfs_writepages(mapping, &wbc); |
1434 | if (ret < 0) | 1358 | if (ret < 0) |
1435 | goto out; | 1359 | goto out; |
1436 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); | 1360 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); |
@@ -1453,11 +1377,9 @@ int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, lo | |||
1453 | }; | 1377 | }; |
1454 | int ret; | 1378 | int ret; |
1455 | 1379 | ||
1456 | if (!(how & FLUSH_NOWRITEPAGE)) { | 1380 | ret = nfs_writepages(mapping, &wbc); |
1457 | ret = generic_writepages(mapping, &wbc); | 1381 | if (ret < 0) |
1458 | if (ret < 0) | 1382 | goto out; |
1459 | goto out; | ||
1460 | } | ||
1461 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); | 1383 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); |
1462 | if (ret >= 0) | 1384 | if (ret >= 0) |
1463 | return 0; | 1385 | return 0; |
@@ -1480,7 +1402,7 @@ int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) | |||
1480 | int ret; | 1402 | int ret; |
1481 | 1403 | ||
1482 | BUG_ON(!PageLocked(page)); | 1404 | BUG_ON(!PageLocked(page)); |
1483 | if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) { | 1405 | if (clear_page_dirty_for_io(page)) { |
1484 | ret = nfs_writepage_locked(page, &wbc); | 1406 | ret = nfs_writepage_locked(page, &wbc); |
1485 | if (ret < 0) | 1407 | if (ret < 0) |
1486 | goto out; | 1408 | goto out; |
@@ -1505,15 +1427,32 @@ int nfs_wb_page(struct inode *inode, struct page* page) | |||
1505 | 1427 | ||
1506 | int nfs_set_page_dirty(struct page *page) | 1428 | int nfs_set_page_dirty(struct page *page) |
1507 | { | 1429 | { |
1430 | struct address_space *mapping = page->mapping; | ||
1431 | struct inode *inode; | ||
1432 | spinlock_t *req_lock; | ||
1508 | struct nfs_page *req; | 1433 | struct nfs_page *req; |
1434 | int ret; | ||
1509 | 1435 | ||
1510 | req = nfs_page_find_request(page); | 1436 | if (!mapping) |
1437 | goto out_raced; | ||
1438 | inode = mapping->host; | ||
1439 | if (!inode) | ||
1440 | goto out_raced; | ||
1441 | req_lock = &NFS_I(inode)->req_lock; | ||
1442 | spin_lock(req_lock); | ||
1443 | req = nfs_page_find_request_locked(page); | ||
1511 | if (req != NULL) { | 1444 | if (req != NULL) { |
1512 | /* Mark any existing write requests for flushing */ | 1445 | /* Mark any existing write requests for flushing */ |
1513 | set_bit(PG_NEED_FLUSH, &req->wb_flags); | 1446 | ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags); |
1447 | spin_unlock(req_lock); | ||
1514 | nfs_release_request(req); | 1448 | nfs_release_request(req); |
1449 | return ret; | ||
1515 | } | 1450 | } |
1516 | return __set_page_dirty_nobuffers(page); | 1451 | ret = __set_page_dirty_nobuffers(page); |
1452 | spin_unlock(req_lock); | ||
1453 | return ret; | ||
1454 | out_raced: | ||
1455 | return !TestSetPageDirty(page); | ||
1517 | } | 1456 | } |
1518 | 1457 | ||
1519 | 1458 | ||