diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-04 22:55:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-04 22:55:11 -0400 |
commit | 4d4700707c0d4be0efc968989fb1cd01c60c0a35 (patch) | |
tree | 478453a4ae9453bd8d26ffc3df6eedcc30799a43 /fs | |
parent | 7e20ef030dde0e52dd5a57220ee82fa9facbea4e (diff) | |
parent | 84dde76c4a2d99ed2d7de6ec82c53b56620900a3 (diff) |
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (28 commits)
NFS: Fix a compile glitch on 64-bit systems
NFS: Clean up nfs_create_request comments
spkm3: initialize hash
spkm3: remove bad kfree, unnecessary export
spkm3: fix spkm3's use of hmac
NFS4: invalidate cached acl on setacl
NFS: Fix directory caching problem - with test case and patch.
NFS: Set meaningful value for fattr->time_start in readdirplus results.
NFS: Added support to turn off the NFSv3 READDIRPLUS RPC.
SUNRPC: RPC client should retry with different versions of rpcbind
SUNRPC: remove old portmapper
NFS: switch NFSROOT to use new rpcbind client
SUNRPC: switch the RPC server to use the new rpcbind registration API
SUNRPC: switch socket-based RPC transports to use rpcbind
SUNRPC: introduce rpcbind: replacement for in-kernel portmapper
SUNRPC: Eliminate side effects from rpc_malloc
SUNRPC: RPC buffer size estimates are too large
NLM: Shrink the maximum request size of NLM4 requests
NFS: Use pgoff_t in structures and functions that pass page cache offsets
NFS: Clean up nfs_sync_mapping_wait()
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/Kconfig | 12 | ||||
-rw-r--r-- | fs/lockd/mon.c | 10 | ||||
-rw-r--r-- | fs/lockd/xdr.c | 20 | ||||
-rw-r--r-- | fs/lockd/xdr4.c | 24 | ||||
-rw-r--r-- | fs/nfs/client.c | 3 | ||||
-rw-r--r-- | fs/nfs/dir.c | 20 | ||||
-rw-r--r-- | fs/nfs/direct.c | 5 | ||||
-rw-r--r-- | fs/nfs/internal.h | 12 | ||||
-rw-r--r-- | fs/nfs/mount_clnt.c | 7 | ||||
-rw-r--r-- | fs/nfs/nfs2xdr.c | 7 | ||||
-rw-r--r-- | fs/nfs/nfs3xdr.c | 13 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 3 | ||||
-rw-r--r-- | fs/nfs/nfs4xdr.c | 7 | ||||
-rw-r--r-- | fs/nfs/nfsroot.c | 2 | ||||
-rw-r--r-- | fs/nfs/pagelist.c | 242 | ||||
-rw-r--r-- | fs/nfs/read.c | 92 | ||||
-rw-r--r-- | fs/nfs/super.c | 10 | ||||
-rw-r--r-- | fs/nfs/write.c | 258 | ||||
-rw-r--r-- | fs/nfsd/nfs4callback.c | 7 |
19 files changed, 349 insertions, 405 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index e33c08924572..8ea7b04c661f 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -1734,6 +1734,18 @@ config SUNRPC | |||
1734 | config SUNRPC_GSS | 1734 | config SUNRPC_GSS |
1735 | tristate | 1735 | tristate |
1736 | 1736 | ||
1737 | config SUNRPC_BIND34 | ||
1738 | bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)" | ||
1739 | depends on SUNRPC && EXPERIMENTAL | ||
1740 | help | ||
1741 | Provides kernel support for querying rpcbind servers via versions 3 | ||
1742 | and 4 of the rpcbind protocol. The kernel automatically falls back | ||
1743 | to version 2 if a remote rpcbind service does not support versions | ||
1744 | 3 or 4. | ||
1745 | |||
1746 | If unsure, say N to get traditional behavior (version 2 rpcbind | ||
1747 | requests only). | ||
1748 | |||
1737 | config RPCSEC_GSS_KRB5 | 1749 | config RPCSEC_GSS_KRB5 |
1738 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" | 1750 | tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)" |
1739 | depends on SUNRPC && EXPERIMENTAL | 1751 | depends on SUNRPC && EXPERIMENTAL |
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index eb243edf8932..2102e2d0134d 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c | |||
@@ -225,16 +225,13 @@ xdr_decode_stat(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp) | |||
225 | #define SM_monres_sz 2 | 225 | #define SM_monres_sz 2 |
226 | #define SM_unmonres_sz 1 | 226 | #define SM_unmonres_sz 1 |
227 | 227 | ||
228 | #ifndef MAX | ||
229 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
230 | #endif | ||
231 | |||
232 | static struct rpc_procinfo nsm_procedures[] = { | 228 | static struct rpc_procinfo nsm_procedures[] = { |
233 | [SM_MON] = { | 229 | [SM_MON] = { |
234 | .p_proc = SM_MON, | 230 | .p_proc = SM_MON, |
235 | .p_encode = (kxdrproc_t) xdr_encode_mon, | 231 | .p_encode = (kxdrproc_t) xdr_encode_mon, |
236 | .p_decode = (kxdrproc_t) xdr_decode_stat_res, | 232 | .p_decode = (kxdrproc_t) xdr_decode_stat_res, |
237 | .p_bufsiz = MAX(SM_mon_sz, SM_monres_sz) << 2, | 233 | .p_arglen = SM_mon_sz, |
234 | .p_replen = SM_monres_sz, | ||
238 | .p_statidx = SM_MON, | 235 | .p_statidx = SM_MON, |
239 | .p_name = "MONITOR", | 236 | .p_name = "MONITOR", |
240 | }, | 237 | }, |
@@ -242,7 +239,8 @@ static struct rpc_procinfo nsm_procedures[] = { | |||
242 | .p_proc = SM_UNMON, | 239 | .p_proc = SM_UNMON, |
243 | .p_encode = (kxdrproc_t) xdr_encode_unmon, | 240 | .p_encode = (kxdrproc_t) xdr_encode_unmon, |
244 | .p_decode = (kxdrproc_t) xdr_decode_stat, | 241 | .p_decode = (kxdrproc_t) xdr_decode_stat, |
245 | .p_bufsiz = MAX(SM_mon_id_sz, SM_unmonres_sz) << 2, | 242 | .p_arglen = SM_mon_id_sz, |
243 | .p_replen = SM_unmonres_sz, | ||
246 | .p_statidx = SM_UNMON, | 244 | .p_statidx = SM_UNMON, |
247 | .p_name = "UNMONITOR", | 245 | .p_name = "UNMONITOR", |
248 | }, | 246 | }, |
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 34dae5d70738..9702956d206c 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c | |||
@@ -510,17 +510,20 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) | |||
510 | return 0; | 510 | return 0; |
511 | } | 511 | } |
512 | 512 | ||
513 | #if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) | ||
514 | # error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" | ||
515 | #endif | ||
516 | |||
513 | /* | 517 | /* |
514 | * Buffer requirements for NLM | 518 | * Buffer requirements for NLM |
515 | */ | 519 | */ |
516 | #define NLM_void_sz 0 | 520 | #define NLM_void_sz 0 |
517 | #define NLM_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) | 521 | #define NLM_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) |
518 | #define NLM_caller_sz 1+XDR_QUADLEN(sizeof(utsname()->nodename)) | 522 | #define NLM_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) |
519 | #define NLM_netobj_sz 1+XDR_QUADLEN(XDR_MAX_NETOBJ) | 523 | #define NLM_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) |
520 | /* #define NLM_owner_sz 1+XDR_QUADLEN(NLM_MAXOWNER) */ | ||
521 | #define NLM_fhandle_sz 1+XDR_QUADLEN(NFS2_FHSIZE) | 524 | #define NLM_fhandle_sz 1+XDR_QUADLEN(NFS2_FHSIZE) |
522 | #define NLM_lock_sz 3+NLM_caller_sz+NLM_netobj_sz+NLM_fhandle_sz | 525 | #define NLM_lock_sz 3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz |
523 | #define NLM_holder_sz 4+NLM_netobj_sz | 526 | #define NLM_holder_sz 4+NLM_owner_sz |
524 | 527 | ||
525 | #define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz | 528 | #define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz |
526 | #define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz | 529 | #define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz |
@@ -531,10 +534,6 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) | |||
531 | #define NLM_res_sz NLM_cookie_sz+1 | 534 | #define NLM_res_sz NLM_cookie_sz+1 |
532 | #define NLM_norep_sz 0 | 535 | #define NLM_norep_sz 0 |
533 | 536 | ||
534 | #ifndef MAX | ||
535 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
536 | #endif | ||
537 | |||
538 | /* | 537 | /* |
539 | * For NLM, a void procedure really returns nothing | 538 | * For NLM, a void procedure really returns nothing |
540 | */ | 539 | */ |
@@ -545,7 +544,8 @@ nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) | |||
545 | .p_proc = NLMPROC_##proc, \ | 544 | .p_proc = NLMPROC_##proc, \ |
546 | .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ | 545 | .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ |
547 | .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ | 546 | .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ |
548 | .p_bufsiz = MAX(NLM_##argtype##_sz, NLM_##restype##_sz) << 2, \ | 547 | .p_arglen = NLM_##argtype##_sz, \ |
548 | .p_replen = NLM_##restype##_sz, \ | ||
549 | .p_statidx = NLMPROC_##proc, \ | 549 | .p_statidx = NLMPROC_##proc, \ |
550 | .p_name = #proc, \ | 550 | .p_name = #proc, \ |
551 | } | 551 | } |
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index a78240551219..ce1efdbe1b3a 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c | |||
@@ -516,17 +516,24 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) | |||
516 | return 0; | 516 | return 0; |
517 | } | 517 | } |
518 | 518 | ||
519 | #if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) | ||
520 | # error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" | ||
521 | #endif | ||
522 | |||
523 | #if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN) | ||
524 | # error "NLM host name cannot be larger than NLM's maximum string length!" | ||
525 | #endif | ||
526 | |||
519 | /* | 527 | /* |
520 | * Buffer requirements for NLM | 528 | * Buffer requirements for NLM |
521 | */ | 529 | */ |
522 | #define NLM4_void_sz 0 | 530 | #define NLM4_void_sz 0 |
523 | #define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) | 531 | #define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) |
524 | #define NLM4_caller_sz 1+XDR_QUADLEN(NLM_MAXSTRLEN) | 532 | #define NLM4_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) |
525 | #define NLM4_netobj_sz 1+XDR_QUADLEN(XDR_MAX_NETOBJ) | 533 | #define NLM4_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) |
526 | /* #define NLM4_owner_sz 1+XDR_QUADLEN(NLM4_MAXOWNER) */ | ||
527 | #define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE) | 534 | #define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE) |
528 | #define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_netobj_sz+NLM4_fhandle_sz | 535 | #define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz |
529 | #define NLM4_holder_sz 6+NLM4_netobj_sz | 536 | #define NLM4_holder_sz 6+NLM4_owner_sz |
530 | 537 | ||
531 | #define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz | 538 | #define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz |
532 | #define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz | 539 | #define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz |
@@ -537,10 +544,6 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) | |||
537 | #define NLM4_res_sz NLM4_cookie_sz+1 | 544 | #define NLM4_res_sz NLM4_cookie_sz+1 |
538 | #define NLM4_norep_sz 0 | 545 | #define NLM4_norep_sz 0 |
539 | 546 | ||
540 | #ifndef MAX | ||
541 | # define MAX(a,b) (((a) > (b))? (a) : (b)) | ||
542 | #endif | ||
543 | |||
544 | /* | 547 | /* |
545 | * For NLM, a void procedure really returns nothing | 548 | * For NLM, a void procedure really returns nothing |
546 | */ | 549 | */ |
@@ -551,7 +554,8 @@ nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) | |||
551 | .p_proc = NLMPROC_##proc, \ | 554 | .p_proc = NLMPROC_##proc, \ |
552 | .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ | 555 | .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ |
553 | .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ | 556 | .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ |
554 | .p_bufsiz = MAX(NLM4_##argtype##_sz, NLM4_##restype##_sz) << 2, \ | 557 | .p_arglen = NLM4_##argtype##_sz, \ |
558 | .p_replen = NLM4_##restype##_sz, \ | ||
555 | .p_statidx = NLMPROC_##proc, \ | 559 | .p_statidx = NLMPROC_##proc, \ |
556 | .p_name = #proc, \ | 560 | .p_name = #proc, \ |
557 | } | 561 | } |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 2190e6c2792e..5bd03b97002e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -618,7 +618,8 @@ static int nfs_init_server(struct nfs_server *server, const struct nfs_mount_dat | |||
618 | if (clp->cl_nfsversion == 3) { | 618 | if (clp->cl_nfsversion == 3) { |
619 | if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) | 619 | if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) |
620 | server->namelen = NFS3_MAXNAMLEN; | 620 | server->namelen = NFS3_MAXNAMLEN; |
621 | server->caps |= NFS_CAP_READDIRPLUS; | 621 | if (!(data->flags & NFS_MOUNT_NORDIRPLUS)) |
622 | server->caps |= NFS_CAP_READDIRPLUS; | ||
622 | } else { | 623 | } else { |
623 | if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) | 624 | if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) |
624 | server->namelen = NFS2_MAXNAMLEN; | 625 | server->namelen = NFS2_MAXNAMLEN; |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index cd3469720cbf..e59fd31c9a22 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -154,6 +154,8 @@ typedef struct { | |||
154 | decode_dirent_t decode; | 154 | decode_dirent_t decode; |
155 | int plus; | 155 | int plus; |
156 | int error; | 156 | int error; |
157 | unsigned long timestamp; | ||
158 | int timestamp_valid; | ||
157 | } nfs_readdir_descriptor_t; | 159 | } nfs_readdir_descriptor_t; |
158 | 160 | ||
159 | /* Now we cache directories properly, by stuffing the dirent | 161 | /* Now we cache directories properly, by stuffing the dirent |
@@ -195,6 +197,8 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page) | |||
195 | } | 197 | } |
196 | goto error; | 198 | goto error; |
197 | } | 199 | } |
200 | desc->timestamp = timestamp; | ||
201 | desc->timestamp_valid = 1; | ||
198 | SetPageUptodate(page); | 202 | SetPageUptodate(page); |
199 | spin_lock(&inode->i_lock); | 203 | spin_lock(&inode->i_lock); |
200 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; | 204 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; |
@@ -225,6 +229,10 @@ int dir_decode(nfs_readdir_descriptor_t *desc) | |||
225 | if (IS_ERR(p)) | 229 | if (IS_ERR(p)) |
226 | return PTR_ERR(p); | 230 | return PTR_ERR(p); |
227 | desc->ptr = p; | 231 | desc->ptr = p; |
232 | if (desc->timestamp_valid) | ||
233 | desc->entry->fattr->time_start = desc->timestamp; | ||
234 | else | ||
235 | desc->entry->fattr->valid &= ~NFS_ATTR_FATTR; | ||
228 | return 0; | 236 | return 0; |
229 | } | 237 | } |
230 | 238 | ||
@@ -316,6 +324,10 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc) | |||
316 | __FUNCTION__, desc->page_index, | 324 | __FUNCTION__, desc->page_index, |
317 | (long long) *desc->dir_cookie); | 325 | (long long) *desc->dir_cookie); |
318 | 326 | ||
327 | /* If we find the page in the page_cache, we cannot be sure | ||
328 | * how fresh the data is, so we will ignore readdir_plus attributes. | ||
329 | */ | ||
330 | desc->timestamp_valid = 0; | ||
319 | page = read_cache_page(inode->i_mapping, desc->page_index, | 331 | page = read_cache_page(inode->i_mapping, desc->page_index, |
320 | (filler_t *)nfs_readdir_filler, desc); | 332 | (filler_t *)nfs_readdir_filler, desc); |
321 | if (IS_ERR(page)) { | 333 | if (IS_ERR(page)) { |
@@ -468,6 +480,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
468 | struct rpc_cred *cred = nfs_file_cred(file); | 480 | struct rpc_cred *cred = nfs_file_cred(file); |
469 | struct page *page = NULL; | 481 | struct page *page = NULL; |
470 | int status; | 482 | int status; |
483 | unsigned long timestamp; | ||
471 | 484 | ||
472 | dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n", | 485 | dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n", |
473 | (unsigned long long)*desc->dir_cookie); | 486 | (unsigned long long)*desc->dir_cookie); |
@@ -477,6 +490,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
477 | status = -ENOMEM; | 490 | status = -ENOMEM; |
478 | goto out; | 491 | goto out; |
479 | } | 492 | } |
493 | timestamp = jiffies; | ||
480 | desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie, | 494 | desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie, |
481 | page, | 495 | page, |
482 | NFS_SERVER(inode)->dtsize, | 496 | NFS_SERVER(inode)->dtsize, |
@@ -487,6 +501,8 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent, | |||
487 | desc->page = page; | 501 | desc->page = page; |
488 | desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ | 502 | desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */ |
489 | if (desc->error >= 0) { | 503 | if (desc->error >= 0) { |
504 | desc->timestamp = timestamp; | ||
505 | desc->timestamp_valid = 1; | ||
490 | if ((status = dir_decode(desc)) == 0) | 506 | if ((status = dir_decode(desc)) == 0) |
491 | desc->entry->prev_cookie = *desc->dir_cookie; | 507 | desc->entry->prev_cookie = *desc->dir_cookie; |
492 | } else | 508 | } else |
@@ -849,6 +865,10 @@ static int nfs_dentry_delete(struct dentry *dentry) | |||
849 | static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) | 865 | static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) |
850 | { | 866 | { |
851 | nfs_inode_return_delegation(inode); | 867 | nfs_inode_return_delegation(inode); |
868 | if (S_ISDIR(inode->i_mode)) | ||
869 | /* drop any readdir cache as it could easily be old */ | ||
870 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; | ||
871 | |||
852 | if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { | 872 | if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { |
853 | lock_kernel(); | 873 | lock_kernel(); |
854 | drop_nlink(inode); | 874 | drop_nlink(inode); |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 2877744cb606..889de60f8a84 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
55 | #include <asm/atomic.h> | 55 | #include <asm/atomic.h> |
56 | 56 | ||
57 | #include "internal.h" | ||
57 | #include "iostat.h" | 58 | #include "iostat.h" |
58 | 59 | ||
59 | #define NFSDBG_FACILITY NFSDBG_VFS | 60 | #define NFSDBG_FACILITY NFSDBG_VFS |
@@ -271,7 +272,7 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo | |||
271 | bytes = min(rsize,count); | 272 | bytes = min(rsize,count); |
272 | 273 | ||
273 | result = -ENOMEM; | 274 | result = -ENOMEM; |
274 | data = nfs_readdata_alloc(pgbase + bytes); | 275 | data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes)); |
275 | if (unlikely(!data)) | 276 | if (unlikely(!data)) |
276 | break; | 277 | break; |
277 | 278 | ||
@@ -602,7 +603,7 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l | |||
602 | bytes = min(wsize,count); | 603 | bytes = min(wsize,count); |
603 | 604 | ||
604 | result = -ENOMEM; | 605 | result = -ENOMEM; |
605 | data = nfs_writedata_alloc(pgbase + bytes); | 606 | data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes)); |
606 | if (unlikely(!data)) | 607 | if (unlikely(!data)) |
607 | break; | 608 | break; |
608 | 609 | ||
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 6610f2b02077..ad2b40db1e65 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -231,3 +231,15 @@ unsigned int nfs_page_length(struct page *page) | |||
231 | } | 231 | } |
232 | return 0; | 232 | return 0; |
233 | } | 233 | } |
234 | |||
235 | /* | ||
236 | * Determine the number of pages in an array of length 'len' and | ||
237 | * with a base offset of 'base' | ||
238 | */ | ||
239 | static inline | ||
240 | unsigned int nfs_page_array_len(unsigned int base, size_t len) | ||
241 | { | ||
242 | return ((unsigned long)len + (unsigned long)base + | ||
243 | PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
244 | } | ||
245 | |||
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index f75fe72b4160..ca5a266a3140 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c | |||
@@ -133,13 +133,15 @@ xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p, struct mnt_fhstatus *res) | |||
133 | 133 | ||
134 | #define MNT_dirpath_sz (1 + 256) | 134 | #define MNT_dirpath_sz (1 + 256) |
135 | #define MNT_fhstatus_sz (1 + 8) | 135 | #define MNT_fhstatus_sz (1 + 8) |
136 | #define MNT_fhstatus3_sz (1 + 16) | ||
136 | 137 | ||
137 | static struct rpc_procinfo mnt_procedures[] = { | 138 | static struct rpc_procinfo mnt_procedures[] = { |
138 | [MNTPROC_MNT] = { | 139 | [MNTPROC_MNT] = { |
139 | .p_proc = MNTPROC_MNT, | 140 | .p_proc = MNTPROC_MNT, |
140 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, | 141 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, |
141 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus, | 142 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus, |
142 | .p_bufsiz = MNT_dirpath_sz << 2, | 143 | .p_arglen = MNT_dirpath_sz, |
144 | .p_replen = MNT_fhstatus_sz, | ||
143 | .p_statidx = MNTPROC_MNT, | 145 | .p_statidx = MNTPROC_MNT, |
144 | .p_name = "MOUNT", | 146 | .p_name = "MOUNT", |
145 | }, | 147 | }, |
@@ -150,7 +152,8 @@ static struct rpc_procinfo mnt3_procedures[] = { | |||
150 | .p_proc = MOUNTPROC3_MNT, | 152 | .p_proc = MOUNTPROC3_MNT, |
151 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, | 153 | .p_encode = (kxdrproc_t) xdr_encode_dirpath, |
152 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, | 154 | .p_decode = (kxdrproc_t) xdr_decode_fhstatus3, |
153 | .p_bufsiz = MNT_dirpath_sz << 2, | 155 | .p_arglen = MNT_dirpath_sz, |
156 | .p_replen = MNT_fhstatus3_sz, | ||
154 | .p_statidx = MOUNTPROC3_MNT, | 157 | .p_statidx = MOUNTPROC3_MNT, |
155 | .p_name = "MOUNT", | 158 | .p_name = "MOUNT", |
156 | }, | 159 | }, |
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 3be4e72a0227..abd9f8b48943 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c | |||
@@ -687,16 +687,13 @@ nfs_stat_to_errno(int stat) | |||
687 | return nfs_errtbl[i].errno; | 687 | return nfs_errtbl[i].errno; |
688 | } | 688 | } |
689 | 689 | ||
690 | #ifndef MAX | ||
691 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
692 | #endif | ||
693 | |||
694 | #define PROC(proc, argtype, restype, timer) \ | 690 | #define PROC(proc, argtype, restype, timer) \ |
695 | [NFSPROC_##proc] = { \ | 691 | [NFSPROC_##proc] = { \ |
696 | .p_proc = NFSPROC_##proc, \ | 692 | .p_proc = NFSPROC_##proc, \ |
697 | .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \ | 693 | .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \ |
698 | .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ | 694 | .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ |
699 | .p_bufsiz = MAX(NFS_##argtype##_sz,NFS_##restype##_sz) << 2, \ | 695 | .p_arglen = NFS_##argtype##_sz, \ |
696 | .p_replen = NFS_##restype##_sz, \ | ||
700 | .p_timer = timer, \ | 697 | .p_timer = timer, \ |
701 | .p_statidx = NFSPROC_##proc, \ | 698 | .p_statidx = NFSPROC_##proc, \ |
702 | .p_name = #proc, \ | 699 | .p_name = #proc, \ |
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 0ace092d126f..b51df8eb9f01 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c | |||
@@ -1102,16 +1102,13 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) | |||
1102 | } | 1102 | } |
1103 | #endif /* CONFIG_NFS_V3_ACL */ | 1103 | #endif /* CONFIG_NFS_V3_ACL */ |
1104 | 1104 | ||
1105 | #ifndef MAX | ||
1106 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
1107 | #endif | ||
1108 | |||
1109 | #define PROC(proc, argtype, restype, timer) \ | 1105 | #define PROC(proc, argtype, restype, timer) \ |
1110 | [NFS3PROC_##proc] = { \ | 1106 | [NFS3PROC_##proc] = { \ |
1111 | .p_proc = NFS3PROC_##proc, \ | 1107 | .p_proc = NFS3PROC_##proc, \ |
1112 | .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \ | 1108 | .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \ |
1113 | .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ | 1109 | .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ |
1114 | .p_bufsiz = MAX(NFS3_##argtype##_sz,NFS3_##restype##_sz) << 2, \ | 1110 | .p_arglen = NFS3_##argtype##_sz, \ |
1111 | .p_replen = NFS3_##restype##_sz, \ | ||
1115 | .p_timer = timer, \ | 1112 | .p_timer = timer, \ |
1116 | .p_statidx = NFS3PROC_##proc, \ | 1113 | .p_statidx = NFS3PROC_##proc, \ |
1117 | .p_name = #proc, \ | 1114 | .p_name = #proc, \ |
@@ -1153,7 +1150,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { | |||
1153 | .p_proc = ACLPROC3_GETACL, | 1150 | .p_proc = ACLPROC3_GETACL, |
1154 | .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, | 1151 | .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, |
1155 | .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, | 1152 | .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, |
1156 | .p_bufsiz = MAX(ACL3_getaclargs_sz, ACL3_getaclres_sz) << 2, | 1153 | .p_arglen = ACL3_getaclargs_sz, |
1154 | .p_replen = ACL3_getaclres_sz, | ||
1157 | .p_timer = 1, | 1155 | .p_timer = 1, |
1158 | .p_name = "GETACL", | 1156 | .p_name = "GETACL", |
1159 | }, | 1157 | }, |
@@ -1161,7 +1159,8 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { | |||
1161 | .p_proc = ACLPROC3_SETACL, | 1159 | .p_proc = ACLPROC3_SETACL, |
1162 | .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, | 1160 | .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, |
1163 | .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, | 1161 | .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, |
1164 | .p_bufsiz = MAX(ACL3_setaclargs_sz, ACL3_setaclres_sz) << 2, | 1162 | .p_arglen = ACL3_setaclargs_sz, |
1163 | .p_replen = ACL3_setaclres_sz, | ||
1165 | .p_timer = 0, | 1164 | .p_timer = 0, |
1166 | .p_name = "SETACL", | 1165 | .p_name = "SETACL", |
1167 | }, | 1166 | }, |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f52cf5c33c6c..3b5ca1b15fe9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -2647,8 +2647,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl | |||
2647 | nfs_inode_return_delegation(inode); | 2647 | nfs_inode_return_delegation(inode); |
2648 | buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); | 2648 | buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); |
2649 | ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); | 2649 | ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); |
2650 | if (ret == 0) | 2650 | nfs_zap_caches(inode); |
2651 | nfs4_write_cached_acl(inode, buf, buflen); | ||
2652 | return ret; | 2651 | return ret; |
2653 | } | 2652 | } |
2654 | 2653 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f02d522fd788..b8c28f2380a5 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -4546,16 +4546,13 @@ nfs4_stat_to_errno(int stat) | |||
4546 | return stat; | 4546 | return stat; |
4547 | } | 4547 | } |
4548 | 4548 | ||
4549 | #ifndef MAX | ||
4550 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
4551 | #endif | ||
4552 | |||
4553 | #define PROC(proc, argtype, restype) \ | 4549 | #define PROC(proc, argtype, restype) \ |
4554 | [NFSPROC4_CLNT_##proc] = { \ | 4550 | [NFSPROC4_CLNT_##proc] = { \ |
4555 | .p_proc = NFSPROC4_COMPOUND, \ | 4551 | .p_proc = NFSPROC4_COMPOUND, \ |
4556 | .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ | 4552 | .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ |
4557 | .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ | 4553 | .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ |
4558 | .p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \ | 4554 | .p_arglen = NFS4_##argtype##_sz, \ |
4555 | .p_replen = NFS4_##restype##_sz, \ | ||
4559 | .p_statidx = NFSPROC4_CLNT_##proc, \ | 4556 | .p_statidx = NFSPROC4_CLNT_##proc, \ |
4560 | .p_name = #proc, \ | 4557 | .p_name = #proc, \ |
4561 | } | 4558 | } |
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 75f819dc0255..49d1008ce1d7 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c | |||
@@ -428,7 +428,7 @@ static int __init root_nfs_getport(int program, int version, int proto) | |||
428 | printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", | 428 | printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", |
429 | program, version, NIPQUAD(servaddr)); | 429 | program, version, NIPQUAD(servaddr)); |
430 | set_sockaddr(&sin, servaddr, 0); | 430 | set_sockaddr(&sin, servaddr, 0); |
431 | return rpc_getport_external(&sin, program, version, proto); | 431 | return rpcb_getport_external(&sin, program, version, proto); |
432 | } | 432 | } |
433 | 433 | ||
434 | 434 | ||
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ca4b1d4ff42b..388950118f59 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -17,7 +17,8 @@ | |||
17 | #include <linux/nfs_page.h> | 17 | #include <linux/nfs_page.h> |
18 | #include <linux/nfs_fs.h> | 18 | #include <linux/nfs_fs.h> |
19 | #include <linux/nfs_mount.h> | 19 | #include <linux/nfs_mount.h> |
20 | #include <linux/writeback.h> | 20 | |
21 | #include "internal.h" | ||
21 | 22 | ||
22 | #define NFS_PARANOIA 1 | 23 | #define NFS_PARANOIA 1 |
23 | 24 | ||
@@ -50,9 +51,7 @@ nfs_page_free(struct nfs_page *p) | |||
50 | * @count: number of bytes to read/write | 51 | * @count: number of bytes to read/write |
51 | * | 52 | * |
52 | * The page must be locked by the caller. This makes sure we never | 53 | * The page must be locked by the caller. This makes sure we never |
53 | * create two different requests for the same page, and avoids | 54 | * create two different requests for the same page. |
54 | * a possible deadlock when we reach the hard limit on the number | ||
55 | * of dirty pages. | ||
56 | * User should ensure it is safe to sleep in this function. | 55 | * User should ensure it is safe to sleep in this function. |
57 | */ | 56 | */ |
58 | struct nfs_page * | 57 | struct nfs_page * |
@@ -63,16 +62,12 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode, | |||
63 | struct nfs_server *server = NFS_SERVER(inode); | 62 | struct nfs_server *server = NFS_SERVER(inode); |
64 | struct nfs_page *req; | 63 | struct nfs_page *req; |
65 | 64 | ||
66 | /* Deal with hard limits. */ | ||
67 | for (;;) { | 65 | for (;;) { |
68 | /* try to allocate the request struct */ | 66 | /* try to allocate the request struct */ |
69 | req = nfs_page_alloc(); | 67 | req = nfs_page_alloc(); |
70 | if (req != NULL) | 68 | if (req != NULL) |
71 | break; | 69 | break; |
72 | 70 | ||
73 | /* Try to free up at least one request in order to stay | ||
74 | * below the hard limit | ||
75 | */ | ||
76 | if (signalled() && (server->flags & NFS_MOUNT_INTR)) | 71 | if (signalled() && (server->flags & NFS_MOUNT_INTR)) |
77 | return ERR_PTR(-ERESTARTSYS); | 72 | return ERR_PTR(-ERESTARTSYS); |
78 | yield(); | 73 | yield(); |
@@ -223,124 +218,151 @@ out: | |||
223 | } | 218 | } |
224 | 219 | ||
225 | /** | 220 | /** |
226 | * nfs_coalesce_requests - Split coalesced requests out from a list. | 221 | * nfs_pageio_init - initialise a page io descriptor |
227 | * @head: source list | 222 | * @desc: pointer to descriptor |
228 | * @dst: destination list | 223 | * @inode: pointer to inode |
229 | * @nmax: maximum number of requests to coalesce | 224 | * @doio: pointer to io function |
230 | * | 225 | * @bsize: io block size |
231 | * Moves a maximum of 'nmax' elements from one list to another. | 226 | * @io_flags: extra parameters for the io function |
232 | * The elements are checked to ensure that they form a contiguous set | ||
233 | * of pages, and that the RPC credentials are the same. | ||
234 | */ | 227 | */ |
235 | int | 228 | void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
236 | nfs_coalesce_requests(struct list_head *head, struct list_head *dst, | 229 | struct inode *inode, |
237 | unsigned int nmax) | 230 | int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int), |
231 | size_t bsize, | ||
232 | int io_flags) | ||
238 | { | 233 | { |
239 | struct nfs_page *req = NULL; | 234 | INIT_LIST_HEAD(&desc->pg_list); |
240 | unsigned int npages = 0; | 235 | desc->pg_bytes_written = 0; |
241 | 236 | desc->pg_count = 0; | |
242 | while (!list_empty(head)) { | 237 | desc->pg_bsize = bsize; |
243 | struct nfs_page *prev = req; | 238 | desc->pg_base = 0; |
244 | 239 | desc->pg_inode = inode; | |
245 | req = nfs_list_entry(head->next); | 240 | desc->pg_doio = doio; |
246 | if (prev) { | 241 | desc->pg_ioflags = io_flags; |
247 | if (req->wb_context->cred != prev->wb_context->cred) | 242 | desc->pg_error = 0; |
248 | break; | ||
249 | if (req->wb_context->lockowner != prev->wb_context->lockowner) | ||
250 | break; | ||
251 | if (req->wb_context->state != prev->wb_context->state) | ||
252 | break; | ||
253 | if (req->wb_index != (prev->wb_index + 1)) | ||
254 | break; | ||
255 | |||
256 | if (req->wb_pgbase != 0) | ||
257 | break; | ||
258 | } | ||
259 | nfs_list_remove_request(req); | ||
260 | nfs_list_add_request(req, dst); | ||
261 | npages++; | ||
262 | if (req->wb_pgbase + req->wb_bytes != PAGE_CACHE_SIZE) | ||
263 | break; | ||
264 | if (npages >= nmax) | ||
265 | break; | ||
266 | } | ||
267 | return npages; | ||
268 | } | 243 | } |
269 | 244 | ||
270 | #define NFS_SCAN_MAXENTRIES 16 | ||
271 | /** | 245 | /** |
272 | * nfs_scan_dirty - Scan the radix tree for dirty requests | 246 | * nfs_can_coalesce_requests - test two requests for compatibility |
273 | * @mapping: pointer to address space | 247 | * @prev: pointer to nfs_page |
274 | * @wbc: writeback_control structure | 248 | * @req: pointer to nfs_page |
275 | * @dst: Destination list | ||
276 | * | 249 | * |
277 | * Moves elements from one of the inode request lists. | 250 | * The nfs_page structures 'prev' and 'req' are compared to ensure that the |
278 | * If the number of requests is set to 0, the entire address_space | 251 | * page data area they describe is contiguous, and that their RPC |
279 | * starting at index idx_start, is scanned. | 252 | * credentials, NFSv4 open state, and lockowners are the same. |
280 | * The requests are *not* checked to ensure that they form a contiguous set. | 253 | * |
281 | * You must be holding the inode's req_lock when calling this function | 254 | * Return 'true' if this is the case, else return 'false'. |
282 | */ | 255 | */ |
283 | long nfs_scan_dirty(struct address_space *mapping, | 256 | static int nfs_can_coalesce_requests(struct nfs_page *prev, |
284 | struct writeback_control *wbc, | 257 | struct nfs_page *req) |
285 | struct list_head *dst) | ||
286 | { | 258 | { |
287 | struct nfs_inode *nfsi = NFS_I(mapping->host); | 259 | if (req->wb_context->cred != prev->wb_context->cred) |
288 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; | ||
289 | struct nfs_page *req; | ||
290 | pgoff_t idx_start, idx_end; | ||
291 | long res = 0; | ||
292 | int found, i; | ||
293 | |||
294 | if (nfsi->ndirty == 0) | ||
295 | return 0; | 260 | return 0; |
296 | if (wbc->range_cyclic) { | 261 | if (req->wb_context->lockowner != prev->wb_context->lockowner) |
297 | idx_start = 0; | 262 | return 0; |
298 | idx_end = ULONG_MAX; | 263 | if (req->wb_context->state != prev->wb_context->state) |
299 | } else if (wbc->range_end == 0) { | 264 | return 0; |
300 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 265 | if (req->wb_index != (prev->wb_index + 1)) |
301 | idx_end = ULONG_MAX; | 266 | return 0; |
302 | } else { | 267 | if (req->wb_pgbase != 0) |
303 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 268 | return 0; |
304 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | 269 | if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) |
305 | } | 270 | return 0; |
271 | return 1; | ||
272 | } | ||
306 | 273 | ||
307 | for (;;) { | 274 | /** |
308 | unsigned int toscan = NFS_SCAN_MAXENTRIES; | 275 | * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list. |
276 | * @desc: destination io descriptor | ||
277 | * @req: request | ||
278 | * | ||
279 | * Returns true if the request 'req' was successfully coalesced into the | ||
280 | * existing list of pages 'desc'. | ||
281 | */ | ||
282 | static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, | ||
283 | struct nfs_page *req) | ||
284 | { | ||
285 | size_t newlen = req->wb_bytes; | ||
309 | 286 | ||
310 | found = radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, | 287 | if (desc->pg_count != 0) { |
311 | (void **)&pgvec[0], idx_start, toscan, | 288 | struct nfs_page *prev; |
312 | NFS_PAGE_TAG_DIRTY); | ||
313 | 289 | ||
314 | /* Did we make progress? */ | 290 | /* |
315 | if (found <= 0) | 291 | * FIXME: ideally we should be able to coalesce all requests |
316 | break; | 292 | * that are not block boundary aligned, but currently this |
293 | * is problematic for the case of bsize < PAGE_CACHE_SIZE, | ||
294 | * since nfs_flush_multi and nfs_pagein_multi assume you | ||
295 | * can have only one struct nfs_page. | ||
296 | */ | ||
297 | if (desc->pg_bsize < PAGE_SIZE) | ||
298 | return 0; | ||
299 | newlen += desc->pg_count; | ||
300 | if (newlen > desc->pg_bsize) | ||
301 | return 0; | ||
302 | prev = nfs_list_entry(desc->pg_list.prev); | ||
303 | if (!nfs_can_coalesce_requests(prev, req)) | ||
304 | return 0; | ||
305 | } else | ||
306 | desc->pg_base = req->wb_pgbase; | ||
307 | nfs_list_remove_request(req); | ||
308 | nfs_list_add_request(req, &desc->pg_list); | ||
309 | desc->pg_count = newlen; | ||
310 | return 1; | ||
311 | } | ||
317 | 312 | ||
318 | for (i = 0; i < found; i++) { | 313 | /* |
319 | req = pgvec[i]; | 314 | * Helper for nfs_pageio_add_request and nfs_pageio_complete |
320 | if (!wbc->range_cyclic && req->wb_index > idx_end) | 315 | */ |
321 | goto out; | 316 | static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) |
317 | { | ||
318 | if (!list_empty(&desc->pg_list)) { | ||
319 | int error = desc->pg_doio(desc->pg_inode, | ||
320 | &desc->pg_list, | ||
321 | nfs_page_array_len(desc->pg_base, | ||
322 | desc->pg_count), | ||
323 | desc->pg_count, | ||
324 | desc->pg_ioflags); | ||
325 | if (error < 0) | ||
326 | desc->pg_error = error; | ||
327 | else | ||
328 | desc->pg_bytes_written += desc->pg_count; | ||
329 | } | ||
330 | if (list_empty(&desc->pg_list)) { | ||
331 | desc->pg_count = 0; | ||
332 | desc->pg_base = 0; | ||
333 | } | ||
334 | } | ||
322 | 335 | ||
323 | /* Try to lock request and mark it for writeback */ | 336 | /** |
324 | if (!nfs_set_page_writeback_locked(req)) | 337 | * nfs_pageio_add_request - Attempt to coalesce a request into a page list. |
325 | goto next; | 338 | * @desc: destination io descriptor |
326 | radix_tree_tag_clear(&nfsi->nfs_page_tree, | 339 | * @req: request |
327 | req->wb_index, NFS_PAGE_TAG_DIRTY); | 340 | * |
328 | nfsi->ndirty--; | 341 | * Returns true if the request 'req' was successfully coalesced into the |
329 | nfs_list_remove_request(req); | 342 | * existing list of pages 'desc'. |
330 | nfs_list_add_request(req, dst); | 343 | */ |
331 | res++; | 344 | int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, |
332 | if (res == LONG_MAX) | 345 | struct nfs_page *req) |
333 | goto out; | 346 | { |
334 | next: | 347 | while (!nfs_pageio_do_add_request(desc, req)) { |
335 | idx_start = req->wb_index + 1; | 348 | nfs_pageio_doio(desc); |
336 | } | 349 | if (desc->pg_error < 0) |
350 | return 0; | ||
337 | } | 351 | } |
338 | out: | 352 | return 1; |
339 | WARN_ON ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)); | ||
340 | return res; | ||
341 | } | 353 | } |
342 | 354 | ||
343 | /** | 355 | /** |
356 | * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor | ||
357 | * @desc: pointer to io descriptor | ||
358 | */ | ||
359 | void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) | ||
360 | { | ||
361 | nfs_pageio_doio(desc); | ||
362 | } | ||
363 | |||
364 | #define NFS_SCAN_MAXENTRIES 16 | ||
365 | /** | ||
344 | * nfs_scan_list - Scan a list for matching requests | 366 | * nfs_scan_list - Scan a list for matching requests |
345 | * @nfsi: NFS inode | 367 | * @nfsi: NFS inode |
346 | * @head: One of the NFS inode request lists | 368 | * @head: One of the NFS inode request lists |
@@ -355,12 +377,12 @@ out: | |||
355 | * You must be holding the inode's req_lock when calling this function | 377 | * You must be holding the inode's req_lock when calling this function |
356 | */ | 378 | */ |
357 | int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, | 379 | int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *head, |
358 | struct list_head *dst, unsigned long idx_start, | 380 | struct list_head *dst, pgoff_t idx_start, |
359 | unsigned int npages) | 381 | unsigned int npages) |
360 | { | 382 | { |
361 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; | 383 | struct nfs_page *pgvec[NFS_SCAN_MAXENTRIES]; |
362 | struct nfs_page *req; | 384 | struct nfs_page *req; |
363 | unsigned long idx_end; | 385 | pgoff_t idx_end; |
364 | int found, i; | 386 | int found, i; |
365 | int res; | 387 | int res; |
366 | 388 | ||
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 6ab4d5a9edf2..9a55807b2a70 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -27,7 +27,8 @@ | |||
27 | 27 | ||
28 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE | 28 | #define NFSDBG_FACILITY NFSDBG_PAGECACHE |
29 | 29 | ||
30 | static int nfs_pagein_one(struct list_head *, struct inode *); | 30 | static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int); |
31 | static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int); | ||
31 | static const struct rpc_call_ops nfs_read_partial_ops; | 32 | static const struct rpc_call_ops nfs_read_partial_ops; |
32 | static const struct rpc_call_ops nfs_read_full_ops; | 33 | static const struct rpc_call_ops nfs_read_full_ops; |
33 | 34 | ||
@@ -36,9 +37,8 @@ static mempool_t *nfs_rdata_mempool; | |||
36 | 37 | ||
37 | #define MIN_POOL_READ (32) | 38 | #define MIN_POOL_READ (32) |
38 | 39 | ||
39 | struct nfs_read_data *nfs_readdata_alloc(size_t len) | 40 | struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) |
40 | { | 41 | { |
41 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
42 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); | 42 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, GFP_NOFS); |
43 | 43 | ||
44 | if (p) { | 44 | if (p) { |
@@ -133,7 +133,10 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, | |||
133 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); | 133 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); |
134 | 134 | ||
135 | nfs_list_add_request(new, &one_request); | 135 | nfs_list_add_request(new, &one_request); |
136 | nfs_pagein_one(&one_request, inode); | 136 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) |
137 | nfs_pagein_multi(inode, &one_request, 1, len, 0); | ||
138 | else | ||
139 | nfs_pagein_one(inode, &one_request, 1, len, 0); | ||
137 | return 0; | 140 | return 0; |
138 | } | 141 | } |
139 | 142 | ||
@@ -230,7 +233,7 @@ static void nfs_execute_read(struct nfs_read_data *data) | |||
230 | * won't see the new data until our attribute cache is updated. This is more | 233 | * won't see the new data until our attribute cache is updated. This is more |
231 | * or less conventional NFS client behavior. | 234 | * or less conventional NFS client behavior. |
232 | */ | 235 | */ |
233 | static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | 236 | static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) |
234 | { | 237 | { |
235 | struct nfs_page *req = nfs_list_entry(head->next); | 238 | struct nfs_page *req = nfs_list_entry(head->next); |
236 | struct page *page = req->wb_page; | 239 | struct page *page = req->wb_page; |
@@ -242,11 +245,11 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | |||
242 | 245 | ||
243 | nfs_list_remove_request(req); | 246 | nfs_list_remove_request(req); |
244 | 247 | ||
245 | nbytes = req->wb_bytes; | 248 | nbytes = count; |
246 | do { | 249 | do { |
247 | size_t len = min(nbytes,rsize); | 250 | size_t len = min(nbytes,rsize); |
248 | 251 | ||
249 | data = nfs_readdata_alloc(len); | 252 | data = nfs_readdata_alloc(1); |
250 | if (!data) | 253 | if (!data) |
251 | goto out_bad; | 254 | goto out_bad; |
252 | INIT_LIST_HEAD(&data->pages); | 255 | INIT_LIST_HEAD(&data->pages); |
@@ -258,23 +261,19 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | |||
258 | 261 | ||
259 | ClearPageError(page); | 262 | ClearPageError(page); |
260 | offset = 0; | 263 | offset = 0; |
261 | nbytes = req->wb_bytes; | 264 | nbytes = count; |
262 | do { | 265 | do { |
263 | data = list_entry(list.next, struct nfs_read_data, pages); | 266 | data = list_entry(list.next, struct nfs_read_data, pages); |
264 | list_del_init(&data->pages); | 267 | list_del_init(&data->pages); |
265 | 268 | ||
266 | data->pagevec[0] = page; | 269 | data->pagevec[0] = page; |
267 | 270 | ||
268 | if (nbytes > rsize) { | 271 | if (nbytes < rsize) |
269 | nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, | 272 | rsize = nbytes; |
270 | rsize, offset); | 273 | nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, |
271 | offset += rsize; | 274 | rsize, offset); |
272 | nbytes -= rsize; | 275 | offset += rsize; |
273 | } else { | 276 | nbytes -= rsize; |
274 | nfs_read_rpcsetup(req, data, &nfs_read_partial_ops, | ||
275 | nbytes, offset); | ||
276 | nbytes = 0; | ||
277 | } | ||
278 | nfs_execute_read(data); | 277 | nfs_execute_read(data); |
279 | } while (nbytes != 0); | 278 | } while (nbytes != 0); |
280 | 279 | ||
@@ -291,30 +290,24 @@ out_bad: | |||
291 | return -ENOMEM; | 290 | return -ENOMEM; |
292 | } | 291 | } |
293 | 292 | ||
294 | static int nfs_pagein_one(struct list_head *head, struct inode *inode) | 293 | static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags) |
295 | { | 294 | { |
296 | struct nfs_page *req; | 295 | struct nfs_page *req; |
297 | struct page **pages; | 296 | struct page **pages; |
298 | struct nfs_read_data *data; | 297 | struct nfs_read_data *data; |
299 | unsigned int count; | ||
300 | 298 | ||
301 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) | 299 | data = nfs_readdata_alloc(npages); |
302 | return nfs_pagein_multi(head, inode); | ||
303 | |||
304 | data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize); | ||
305 | if (!data) | 300 | if (!data) |
306 | goto out_bad; | 301 | goto out_bad; |
307 | 302 | ||
308 | INIT_LIST_HEAD(&data->pages); | 303 | INIT_LIST_HEAD(&data->pages); |
309 | pages = data->pagevec; | 304 | pages = data->pagevec; |
310 | count = 0; | ||
311 | while (!list_empty(head)) { | 305 | while (!list_empty(head)) { |
312 | req = nfs_list_entry(head->next); | 306 | req = nfs_list_entry(head->next); |
313 | nfs_list_remove_request(req); | 307 | nfs_list_remove_request(req); |
314 | nfs_list_add_request(req, &data->pages); | 308 | nfs_list_add_request(req, &data->pages); |
315 | ClearPageError(req->wb_page); | 309 | ClearPageError(req->wb_page); |
316 | *pages++ = req->wb_page; | 310 | *pages++ = req->wb_page; |
317 | count += req->wb_bytes; | ||
318 | } | 311 | } |
319 | req = nfs_list_entry(data->pages.next); | 312 | req = nfs_list_entry(data->pages.next); |
320 | 313 | ||
@@ -327,28 +320,6 @@ out_bad: | |||
327 | return -ENOMEM; | 320 | return -ENOMEM; |
328 | } | 321 | } |
329 | 322 | ||
330 | static int | ||
331 | nfs_pagein_list(struct list_head *head, int rpages) | ||
332 | { | ||
333 | LIST_HEAD(one_request); | ||
334 | struct nfs_page *req; | ||
335 | int error = 0; | ||
336 | unsigned int pages = 0; | ||
337 | |||
338 | while (!list_empty(head)) { | ||
339 | pages += nfs_coalesce_requests(head, &one_request, rpages); | ||
340 | req = nfs_list_entry(one_request.next); | ||
341 | error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode); | ||
342 | if (error < 0) | ||
343 | break; | ||
344 | } | ||
345 | if (error >= 0) | ||
346 | return pages; | ||
347 | |||
348 | nfs_async_read_error(head); | ||
349 | return error; | ||
350 | } | ||
351 | |||
352 | /* | 323 | /* |
353 | * This is the callback from RPC telling us whether a reply was | 324 | * This is the callback from RPC telling us whether a reply was |
354 | * received or some error occurred (timeout or socket shutdown). | 325 | * received or some error occurred (timeout or socket shutdown). |
@@ -538,7 +509,7 @@ out_error: | |||
538 | } | 509 | } |
539 | 510 | ||
540 | struct nfs_readdesc { | 511 | struct nfs_readdesc { |
541 | struct list_head *head; | 512 | struct nfs_pageio_descriptor *pgio; |
542 | struct nfs_open_context *ctx; | 513 | struct nfs_open_context *ctx; |
543 | }; | 514 | }; |
544 | 515 | ||
@@ -562,19 +533,21 @@ readpage_async_filler(void *data, struct page *page) | |||
562 | } | 533 | } |
563 | if (len < PAGE_CACHE_SIZE) | 534 | if (len < PAGE_CACHE_SIZE) |
564 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); | 535 | memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len); |
565 | nfs_list_add_request(new, desc->head); | 536 | nfs_pageio_add_request(desc->pgio, new); |
566 | return 0; | 537 | return 0; |
567 | } | 538 | } |
568 | 539 | ||
569 | int nfs_readpages(struct file *filp, struct address_space *mapping, | 540 | int nfs_readpages(struct file *filp, struct address_space *mapping, |
570 | struct list_head *pages, unsigned nr_pages) | 541 | struct list_head *pages, unsigned nr_pages) |
571 | { | 542 | { |
572 | LIST_HEAD(head); | 543 | struct nfs_pageio_descriptor pgio; |
573 | struct nfs_readdesc desc = { | 544 | struct nfs_readdesc desc = { |
574 | .head = &head, | 545 | .pgio = &pgio, |
575 | }; | 546 | }; |
576 | struct inode *inode = mapping->host; | 547 | struct inode *inode = mapping->host; |
577 | struct nfs_server *server = NFS_SERVER(inode); | 548 | struct nfs_server *server = NFS_SERVER(inode); |
549 | size_t rsize = server->rsize; | ||
550 | unsigned long npages; | ||
578 | int ret = -ESTALE; | 551 | int ret = -ESTALE; |
579 | 552 | ||
580 | dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", | 553 | dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", |
@@ -593,13 +566,16 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, | |||
593 | } else | 566 | } else |
594 | desc.ctx = get_nfs_open_context((struct nfs_open_context *) | 567 | desc.ctx = get_nfs_open_context((struct nfs_open_context *) |
595 | filp->private_data); | 568 | filp->private_data); |
569 | if (rsize < PAGE_CACHE_SIZE) | ||
570 | nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0); | ||
571 | else | ||
572 | nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0); | ||
573 | |||
596 | ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); | 574 | ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); |
597 | if (!list_empty(&head)) { | 575 | |
598 | int err = nfs_pagein_list(&head, server->rpages); | 576 | nfs_pageio_complete(&pgio); |
599 | if (!ret) | 577 | npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
600 | nfs_add_stats(inode, NFSIOS_READPAGES, err); | 578 | nfs_add_stats(inode, NFSIOS_READPAGES, npages); |
601 | ret = err; | ||
602 | } | ||
603 | put_nfs_open_context(desc.ctx); | 579 | put_nfs_open_context(desc.ctx); |
604 | out: | 580 | out: |
605 | return ret; | 581 | return ret; |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f1eae44b9a1a..ca20d3cc2609 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -204,9 +204,9 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
204 | lock_kernel(); | 204 | lock_kernel(); |
205 | 205 | ||
206 | error = server->nfs_client->rpc_ops->statfs(server, fh, &res); | 206 | error = server->nfs_client->rpc_ops->statfs(server, fh, &res); |
207 | buf->f_type = NFS_SUPER_MAGIC; | ||
208 | if (error < 0) | 207 | if (error < 0) |
209 | goto out_err; | 208 | goto out_err; |
209 | buf->f_type = NFS_SUPER_MAGIC; | ||
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Current versions of glibc do not correctly handle the | 212 | * Current versions of glibc do not correctly handle the |
@@ -233,15 +233,14 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
233 | buf->f_ffree = res.afiles; | 233 | buf->f_ffree = res.afiles; |
234 | 234 | ||
235 | buf->f_namelen = server->namelen; | 235 | buf->f_namelen = server->namelen; |
236 | out: | 236 | |
237 | unlock_kernel(); | 237 | unlock_kernel(); |
238 | return 0; | 238 | return 0; |
239 | 239 | ||
240 | out_err: | 240 | out_err: |
241 | dprintk("%s: statfs error = %d\n", __FUNCTION__, -error); | 241 | dprintk("%s: statfs error = %d\n", __FUNCTION__, -error); |
242 | buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; | 242 | unlock_kernel(); |
243 | goto out; | 243 | return error; |
244 | |||
245 | } | 244 | } |
246 | 245 | ||
247 | /* | 246 | /* |
@@ -291,6 +290,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, | |||
291 | { NFS_MOUNT_NOAC, ",noac", "" }, | 290 | { NFS_MOUNT_NOAC, ",noac", "" }, |
292 | { NFS_MOUNT_NONLM, ",nolock", "" }, | 291 | { NFS_MOUNT_NONLM, ",nolock", "" }, |
293 | { NFS_MOUNT_NOACL, ",noacl", "" }, | 292 | { NFS_MOUNT_NOACL, ",noacl", "" }, |
293 | { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, | ||
294 | { 0, NULL, NULL } | 294 | { 0, NULL, NULL } |
295 | }; | 295 | }; |
296 | const struct proc_nfs_info *nfs_infop; | 296 | const struct proc_nfs_info *nfs_infop; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 797558941745..5d44b8bd1070 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -38,7 +38,8 @@ | |||
38 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, | 38 | static struct nfs_page * nfs_update_request(struct nfs_open_context*, |
39 | struct page *, | 39 | struct page *, |
40 | unsigned int, unsigned int); | 40 | unsigned int, unsigned int); |
41 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); | 41 | static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, |
42 | struct inode *inode, int ioflags); | ||
42 | static const struct rpc_call_ops nfs_write_partial_ops; | 43 | static const struct rpc_call_ops nfs_write_partial_ops; |
43 | static const struct rpc_call_ops nfs_write_full_ops; | 44 | static const struct rpc_call_ops nfs_write_full_ops; |
44 | static const struct rpc_call_ops nfs_commit_ops; | 45 | static const struct rpc_call_ops nfs_commit_ops; |
@@ -71,9 +72,8 @@ void nfs_commit_free(struct nfs_write_data *wdata) | |||
71 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); | 72 | call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free); |
72 | } | 73 | } |
73 | 74 | ||
74 | struct nfs_write_data *nfs_writedata_alloc(size_t len) | 75 | struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) |
75 | { | 76 | { |
76 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
77 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); | 77 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); |
78 | 78 | ||
79 | if (p) { | 79 | if (p) { |
@@ -139,7 +139,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c | |||
139 | { | 139 | { |
140 | struct inode *inode = page->mapping->host; | 140 | struct inode *inode = page->mapping->host; |
141 | loff_t end, i_size = i_size_read(inode); | 141 | loff_t end, i_size = i_size_read(inode); |
142 | unsigned long end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; | 142 | pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; |
143 | 143 | ||
144 | if (i_size > 0 && page->index < end_index) | 144 | if (i_size > 0 && page->index < end_index) |
145 | return; | 145 | return; |
@@ -201,7 +201,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, | |||
201 | static int wb_priority(struct writeback_control *wbc) | 201 | static int wb_priority(struct writeback_control *wbc) |
202 | { | 202 | { |
203 | if (wbc->for_reclaim) | 203 | if (wbc->for_reclaim) |
204 | return FLUSH_HIGHPRI; | 204 | return FLUSH_HIGHPRI | FLUSH_STABLE; |
205 | if (wbc->for_kupdate) | 205 | if (wbc->for_kupdate) |
206 | return FLUSH_LOWPRI; | 206 | return FLUSH_LOWPRI; |
207 | return 0; | 207 | return 0; |
@@ -251,7 +251,8 @@ static void nfs_end_page_writeback(struct page *page) | |||
251 | * was not tagged. | 251 | * was not tagged. |
252 | * May also return an error if the user signalled nfs_wait_on_request(). | 252 | * May also return an error if the user signalled nfs_wait_on_request(). |
253 | */ | 253 | */ |
254 | static int nfs_page_mark_flush(struct page *page) | 254 | static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, |
255 | struct page *page) | ||
255 | { | 256 | { |
256 | struct nfs_page *req; | 257 | struct nfs_page *req; |
257 | struct nfs_inode *nfsi = NFS_I(page->mapping->host); | 258 | struct nfs_inode *nfsi = NFS_I(page->mapping->host); |
@@ -273,6 +274,8 @@ static int nfs_page_mark_flush(struct page *page) | |||
273 | * request as dirty (in which case we don't care). | 274 | * request as dirty (in which case we don't care). |
274 | */ | 275 | */ |
275 | spin_unlock(req_lock); | 276 | spin_unlock(req_lock); |
277 | /* Prevent deadlock! */ | ||
278 | nfs_pageio_complete(pgio); | ||
276 | ret = nfs_wait_on_request(req); | 279 | ret = nfs_wait_on_request(req); |
277 | nfs_release_request(req); | 280 | nfs_release_request(req); |
278 | if (ret != 0) | 281 | if (ret != 0) |
@@ -283,21 +286,18 @@ static int nfs_page_mark_flush(struct page *page) | |||
283 | /* This request is marked for commit */ | 286 | /* This request is marked for commit */ |
284 | spin_unlock(req_lock); | 287 | spin_unlock(req_lock); |
285 | nfs_unlock_request(req); | 288 | nfs_unlock_request(req); |
289 | nfs_pageio_complete(pgio); | ||
286 | return 1; | 290 | return 1; |
287 | } | 291 | } |
288 | if (nfs_set_page_writeback(page) == 0) { | 292 | if (nfs_set_page_writeback(page) != 0) { |
289 | nfs_list_remove_request(req); | ||
290 | /* add the request to the inode's dirty list. */ | ||
291 | radix_tree_tag_set(&nfsi->nfs_page_tree, | ||
292 | req->wb_index, NFS_PAGE_TAG_DIRTY); | ||
293 | nfs_list_add_request(req, &nfsi->dirty); | ||
294 | nfsi->ndirty++; | ||
295 | spin_unlock(req_lock); | ||
296 | __mark_inode_dirty(page->mapping->host, I_DIRTY_PAGES); | ||
297 | } else | ||
298 | spin_unlock(req_lock); | 293 | spin_unlock(req_lock); |
294 | BUG(); | ||
295 | } | ||
296 | radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, | ||
297 | NFS_PAGE_TAG_WRITEBACK); | ||
299 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); | 298 | ret = test_bit(PG_NEED_FLUSH, &req->wb_flags); |
300 | nfs_unlock_request(req); | 299 | spin_unlock(req_lock); |
300 | nfs_pageio_add_request(pgio, req); | ||
301 | return ret; | 301 | return ret; |
302 | } | 302 | } |
303 | 303 | ||
@@ -306,6 +306,7 @@ static int nfs_page_mark_flush(struct page *page) | |||
306 | */ | 306 | */ |
307 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) | 307 | static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) |
308 | { | 308 | { |
309 | struct nfs_pageio_descriptor mypgio, *pgio; | ||
309 | struct nfs_open_context *ctx; | 310 | struct nfs_open_context *ctx; |
310 | struct inode *inode = page->mapping->host; | 311 | struct inode *inode = page->mapping->host; |
311 | unsigned offset; | 312 | unsigned offset; |
@@ -314,7 +315,14 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc | |||
314 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); | 315 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); |
315 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); | 316 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); |
316 | 317 | ||
317 | err = nfs_page_mark_flush(page); | 318 | if (wbc->for_writepages) |
319 | pgio = wbc->fs_private; | ||
320 | else { | ||
321 | nfs_pageio_init_write(&mypgio, inode, wb_priority(wbc)); | ||
322 | pgio = &mypgio; | ||
323 | } | ||
324 | |||
325 | err = nfs_page_async_flush(pgio, page); | ||
318 | if (err <= 0) | 326 | if (err <= 0) |
319 | goto out; | 327 | goto out; |
320 | err = 0; | 328 | err = 0; |
@@ -331,12 +339,12 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc | |||
331 | put_nfs_open_context(ctx); | 339 | put_nfs_open_context(ctx); |
332 | if (err != 0) | 340 | if (err != 0) |
333 | goto out; | 341 | goto out; |
334 | err = nfs_page_mark_flush(page); | 342 | err = nfs_page_async_flush(pgio, page); |
335 | if (err > 0) | 343 | if (err > 0) |
336 | err = 0; | 344 | err = 0; |
337 | out: | 345 | out: |
338 | if (!wbc->for_writepages) | 346 | if (!wbc->for_writepages) |
339 | nfs_flush_mapping(page->mapping, wbc, FLUSH_STABLE|wb_priority(wbc)); | 347 | nfs_pageio_complete(pgio); |
340 | return err; | 348 | return err; |
341 | } | 349 | } |
342 | 350 | ||
@@ -352,20 +360,20 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) | |||
352 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | 360 | int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) |
353 | { | 361 | { |
354 | struct inode *inode = mapping->host; | 362 | struct inode *inode = mapping->host; |
363 | struct nfs_pageio_descriptor pgio; | ||
355 | int err; | 364 | int err; |
356 | 365 | ||
357 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); | 366 | nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); |
358 | 367 | ||
368 | nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); | ||
369 | wbc->fs_private = &pgio; | ||
359 | err = generic_writepages(mapping, wbc); | 370 | err = generic_writepages(mapping, wbc); |
371 | nfs_pageio_complete(&pgio); | ||
360 | if (err) | 372 | if (err) |
361 | return err; | 373 | return err; |
362 | err = nfs_flush_mapping(mapping, wbc, wb_priority(wbc)); | 374 | if (pgio.pg_error) |
363 | if (err < 0) | 375 | return pgio.pg_error; |
364 | goto out; | 376 | return 0; |
365 | nfs_add_stats(inode, NFSIOS_WRITEPAGES, err); | ||
366 | err = 0; | ||
367 | out: | ||
368 | return err; | ||
369 | } | 377 | } |
370 | 378 | ||
371 | /* | 379 | /* |
@@ -503,11 +511,11 @@ int nfs_reschedule_unstable_write(struct nfs_page *req) | |||
503 | * | 511 | * |
504 | * Interruptible by signals only if mounted with intr flag. | 512 | * Interruptible by signals only if mounted with intr flag. |
505 | */ | 513 | */ |
506 | static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_start, unsigned int npages) | 514 | static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) |
507 | { | 515 | { |
508 | struct nfs_inode *nfsi = NFS_I(inode); | 516 | struct nfs_inode *nfsi = NFS_I(inode); |
509 | struct nfs_page *req; | 517 | struct nfs_page *req; |
510 | unsigned long idx_end, next; | 518 | pgoff_t idx_end, next; |
511 | unsigned int res = 0; | 519 | unsigned int res = 0; |
512 | int error; | 520 | int error; |
513 | 521 | ||
@@ -536,18 +544,6 @@ static int nfs_wait_on_requests_locked(struct inode *inode, unsigned long idx_st | |||
536 | return res; | 544 | return res; |
537 | } | 545 | } |
538 | 546 | ||
539 | static void nfs_cancel_dirty_list(struct list_head *head) | ||
540 | { | ||
541 | struct nfs_page *req; | ||
542 | while(!list_empty(head)) { | ||
543 | req = nfs_list_entry(head->next); | ||
544 | nfs_list_remove_request(req); | ||
545 | nfs_end_page_writeback(req->wb_page); | ||
546 | nfs_inode_remove_request(req); | ||
547 | nfs_clear_page_writeback(req); | ||
548 | } | ||
549 | } | ||
550 | |||
551 | static void nfs_cancel_commit_list(struct list_head *head) | 547 | static void nfs_cancel_commit_list(struct list_head *head) |
552 | { | 548 | { |
553 | struct nfs_page *req; | 549 | struct nfs_page *req; |
@@ -574,7 +570,7 @@ static void nfs_cancel_commit_list(struct list_head *head) | |||
574 | * The requests are *not* checked to ensure that they form a contiguous set. | 570 | * The requests are *not* checked to ensure that they form a contiguous set. |
575 | */ | 571 | */ |
576 | static int | 572 | static int |
577 | nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 573 | nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) |
578 | { | 574 | { |
579 | struct nfs_inode *nfsi = NFS_I(inode); | 575 | struct nfs_inode *nfsi = NFS_I(inode); |
580 | int res = 0; | 576 | int res = 0; |
@@ -588,40 +584,12 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_st | |||
588 | return res; | 584 | return res; |
589 | } | 585 | } |
590 | #else | 586 | #else |
591 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_start, unsigned int npages) | 587 | static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) |
592 | { | 588 | { |
593 | return 0; | 589 | return 0; |
594 | } | 590 | } |
595 | #endif | 591 | #endif |
596 | 592 | ||
597 | static int nfs_wait_on_write_congestion(struct address_space *mapping) | ||
598 | { | ||
599 | struct inode *inode = mapping->host; | ||
600 | struct backing_dev_info *bdi = mapping->backing_dev_info; | ||
601 | int ret = 0; | ||
602 | |||
603 | might_sleep(); | ||
604 | |||
605 | if (!bdi_write_congested(bdi)) | ||
606 | return 0; | ||
607 | |||
608 | nfs_inc_stats(inode, NFSIOS_CONGESTIONWAIT); | ||
609 | |||
610 | do { | ||
611 | struct rpc_clnt *clnt = NFS_CLIENT(inode); | ||
612 | sigset_t oldset; | ||
613 | |||
614 | rpc_clnt_sigmask(clnt, &oldset); | ||
615 | ret = congestion_wait_interruptible(WRITE, HZ/10); | ||
616 | rpc_clnt_sigunmask(clnt, &oldset); | ||
617 | if (ret == -ERESTARTSYS) | ||
618 | break; | ||
619 | ret = 0; | ||
620 | } while (bdi_write_congested(bdi)); | ||
621 | |||
622 | return ret; | ||
623 | } | ||
624 | |||
625 | /* | 593 | /* |
626 | * Try to update any existing write request, or create one if there is none. | 594 | * Try to update any existing write request, or create one if there is none. |
627 | * In order to match, the request's credentials must match those of | 595 | * In order to match, the request's credentials must match those of |
@@ -636,12 +604,10 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx, | |||
636 | struct inode *inode = mapping->host; | 604 | struct inode *inode = mapping->host; |
637 | struct nfs_inode *nfsi = NFS_I(inode); | 605 | struct nfs_inode *nfsi = NFS_I(inode); |
638 | struct nfs_page *req, *new = NULL; | 606 | struct nfs_page *req, *new = NULL; |
639 | unsigned long rqend, end; | 607 | pgoff_t rqend, end; |
640 | 608 | ||
641 | end = offset + bytes; | 609 | end = offset + bytes; |
642 | 610 | ||
643 | if (nfs_wait_on_write_congestion(mapping)) | ||
644 | return ERR_PTR(-ERESTARTSYS); | ||
645 | for (;;) { | 611 | for (;;) { |
646 | /* Loop over all inode entries and see if we find | 612 | /* Loop over all inode entries and see if we find |
647 | * A request for the page we wish to update | 613 | * A request for the page we wish to update |
@@ -865,7 +831,7 @@ static void nfs_execute_write(struct nfs_write_data *data) | |||
865 | * Generate multiple small requests to write out a single | 831 | * Generate multiple small requests to write out a single |
866 | * contiguous dirty area on one page. | 832 | * contiguous dirty area on one page. |
867 | */ | 833 | */ |
868 | static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | 834 | static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how) |
869 | { | 835 | { |
870 | struct nfs_page *req = nfs_list_entry(head->next); | 836 | struct nfs_page *req = nfs_list_entry(head->next); |
871 | struct page *page = req->wb_page; | 837 | struct page *page = req->wb_page; |
@@ -877,11 +843,11 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | |||
877 | 843 | ||
878 | nfs_list_remove_request(req); | 844 | nfs_list_remove_request(req); |
879 | 845 | ||
880 | nbytes = req->wb_bytes; | 846 | nbytes = count; |
881 | do { | 847 | do { |
882 | size_t len = min(nbytes, wsize); | 848 | size_t len = min(nbytes, wsize); |
883 | 849 | ||
884 | data = nfs_writedata_alloc(len); | 850 | data = nfs_writedata_alloc(1); |
885 | if (!data) | 851 | if (!data) |
886 | goto out_bad; | 852 | goto out_bad; |
887 | list_add(&data->pages, &list); | 853 | list_add(&data->pages, &list); |
@@ -892,23 +858,19 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | |||
892 | 858 | ||
893 | ClearPageError(page); | 859 | ClearPageError(page); |
894 | offset = 0; | 860 | offset = 0; |
895 | nbytes = req->wb_bytes; | 861 | nbytes = count; |
896 | do { | 862 | do { |
897 | data = list_entry(list.next, struct nfs_write_data, pages); | 863 | data = list_entry(list.next, struct nfs_write_data, pages); |
898 | list_del_init(&data->pages); | 864 | list_del_init(&data->pages); |
899 | 865 | ||
900 | data->pagevec[0] = page; | 866 | data->pagevec[0] = page; |
901 | 867 | ||
902 | if (nbytes > wsize) { | 868 | if (nbytes < wsize) |
903 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, | 869 | wsize = nbytes; |
904 | wsize, offset, how); | 870 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, |
905 | offset += wsize; | 871 | wsize, offset, how); |
906 | nbytes -= wsize; | 872 | offset += wsize; |
907 | } else { | 873 | nbytes -= wsize; |
908 | nfs_write_rpcsetup(req, data, &nfs_write_partial_ops, | ||
909 | nbytes, offset, how); | ||
910 | nbytes = 0; | ||
911 | } | ||
912 | nfs_execute_write(data); | 874 | nfs_execute_write(data); |
913 | } while (nbytes != 0); | 875 | } while (nbytes != 0); |
914 | 876 | ||
@@ -934,26 +896,23 @@ out_bad: | |||
934 | * This is the case if nfs_updatepage detects a conflicting request | 896 | * This is the case if nfs_updatepage detects a conflicting request |
935 | * that has been written but not committed. | 897 | * that has been written but not committed. |
936 | */ | 898 | */ |
937 | static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | 899 | static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how) |
938 | { | 900 | { |
939 | struct nfs_page *req; | 901 | struct nfs_page *req; |
940 | struct page **pages; | 902 | struct page **pages; |
941 | struct nfs_write_data *data; | 903 | struct nfs_write_data *data; |
942 | unsigned int count; | ||
943 | 904 | ||
944 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); | 905 | data = nfs_writedata_alloc(npages); |
945 | if (!data) | 906 | if (!data) |
946 | goto out_bad; | 907 | goto out_bad; |
947 | 908 | ||
948 | pages = data->pagevec; | 909 | pages = data->pagevec; |
949 | count = 0; | ||
950 | while (!list_empty(head)) { | 910 | while (!list_empty(head)) { |
951 | req = nfs_list_entry(head->next); | 911 | req = nfs_list_entry(head->next); |
952 | nfs_list_remove_request(req); | 912 | nfs_list_remove_request(req); |
953 | nfs_list_add_request(req, &data->pages); | 913 | nfs_list_add_request(req, &data->pages); |
954 | ClearPageError(req->wb_page); | 914 | ClearPageError(req->wb_page); |
955 | *pages++ = req->wb_page; | 915 | *pages++ = req->wb_page; |
956 | count += req->wb_bytes; | ||
957 | } | 916 | } |
958 | req = nfs_list_entry(data->pages.next); | 917 | req = nfs_list_entry(data->pages.next); |
959 | 918 | ||
@@ -973,40 +932,15 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | |||
973 | return -ENOMEM; | 932 | return -ENOMEM; |
974 | } | 933 | } |
975 | 934 | ||
976 | static int nfs_flush_list(struct inode *inode, struct list_head *head, int npages, int how) | 935 | static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, |
936 | struct inode *inode, int ioflags) | ||
977 | { | 937 | { |
978 | LIST_HEAD(one_request); | ||
979 | int (*flush_one)(struct inode *, struct list_head *, int); | ||
980 | struct nfs_page *req; | ||
981 | int wpages = NFS_SERVER(inode)->wpages; | ||
982 | int wsize = NFS_SERVER(inode)->wsize; | 938 | int wsize = NFS_SERVER(inode)->wsize; |
983 | int error; | ||
984 | 939 | ||
985 | flush_one = nfs_flush_one; | ||
986 | if (wsize < PAGE_CACHE_SIZE) | 940 | if (wsize < PAGE_CACHE_SIZE) |
987 | flush_one = nfs_flush_multi; | 941 | nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags); |
988 | /* For single writes, FLUSH_STABLE is more efficient */ | 942 | else |
989 | if (npages <= wpages && npages == NFS_I(inode)->npages | 943 | nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags); |
990 | && nfs_list_entry(head->next)->wb_bytes <= wsize) | ||
991 | how |= FLUSH_STABLE; | ||
992 | |||
993 | do { | ||
994 | nfs_coalesce_requests(head, &one_request, wpages); | ||
995 | req = nfs_list_entry(one_request.next); | ||
996 | error = flush_one(inode, &one_request, how); | ||
997 | if (error < 0) | ||
998 | goto out_err; | ||
999 | } while (!list_empty(head)); | ||
1000 | return 0; | ||
1001 | out_err: | ||
1002 | while (!list_empty(head)) { | ||
1003 | req = nfs_list_entry(head->next); | ||
1004 | nfs_list_remove_request(req); | ||
1005 | nfs_redirty_request(req); | ||
1006 | nfs_end_page_writeback(req->wb_page); | ||
1007 | nfs_clear_page_writeback(req); | ||
1008 | } | ||
1009 | return error; | ||
1010 | } | 944 | } |
1011 | 945 | ||
1012 | /* | 946 | /* |
@@ -1330,31 +1264,7 @@ static const struct rpc_call_ops nfs_commit_ops = { | |||
1330 | .rpc_call_done = nfs_commit_done, | 1264 | .rpc_call_done = nfs_commit_done, |
1331 | .rpc_release = nfs_commit_release, | 1265 | .rpc_release = nfs_commit_release, |
1332 | }; | 1266 | }; |
1333 | #else | ||
1334 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | ||
1335 | { | ||
1336 | return 0; | ||
1337 | } | ||
1338 | #endif | ||
1339 | |||
1340 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) | ||
1341 | { | ||
1342 | struct nfs_inode *nfsi = NFS_I(mapping->host); | ||
1343 | LIST_HEAD(head); | ||
1344 | long res; | ||
1345 | |||
1346 | spin_lock(&nfsi->req_lock); | ||
1347 | res = nfs_scan_dirty(mapping, wbc, &head); | ||
1348 | spin_unlock(&nfsi->req_lock); | ||
1349 | if (res) { | ||
1350 | int error = nfs_flush_list(mapping->host, &head, res, how); | ||
1351 | if (error < 0) | ||
1352 | return error; | ||
1353 | } | ||
1354 | return res; | ||
1355 | } | ||
1356 | 1267 | ||
1357 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | ||
1358 | int nfs_commit_inode(struct inode *inode, int how) | 1268 | int nfs_commit_inode(struct inode *inode, int how) |
1359 | { | 1269 | { |
1360 | struct nfs_inode *nfsi = NFS_I(inode); | 1270 | struct nfs_inode *nfsi = NFS_I(inode); |
@@ -1371,13 +1281,18 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
1371 | } | 1281 | } |
1372 | return res; | 1282 | return res; |
1373 | } | 1283 | } |
1284 | #else | ||
1285 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | ||
1286 | { | ||
1287 | return 0; | ||
1288 | } | ||
1374 | #endif | 1289 | #endif |
1375 | 1290 | ||
1376 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) | 1291 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) |
1377 | { | 1292 | { |
1378 | struct inode *inode = mapping->host; | 1293 | struct inode *inode = mapping->host; |
1379 | struct nfs_inode *nfsi = NFS_I(inode); | 1294 | struct nfs_inode *nfsi = NFS_I(inode); |
1380 | unsigned long idx_start, idx_end; | 1295 | pgoff_t idx_start, idx_end; |
1381 | unsigned int npages = 0; | 1296 | unsigned int npages = 0; |
1382 | LIST_HEAD(head); | 1297 | LIST_HEAD(head); |
1383 | int nocommit = how & FLUSH_NOCOMMIT; | 1298 | int nocommit = how & FLUSH_NOCOMMIT; |
@@ -1390,41 +1305,24 @@ long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_contr | |||
1390 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | 1305 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; |
1391 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | 1306 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; |
1392 | if (idx_end > idx_start) { | 1307 | if (idx_end > idx_start) { |
1393 | unsigned long l_npages = 1 + idx_end - idx_start; | 1308 | pgoff_t l_npages = 1 + idx_end - idx_start; |
1394 | npages = l_npages; | 1309 | npages = l_npages; |
1395 | if (sizeof(npages) != sizeof(l_npages) && | 1310 | if (sizeof(npages) != sizeof(l_npages) && |
1396 | (unsigned long)npages != l_npages) | 1311 | (pgoff_t)npages != l_npages) |
1397 | npages = 0; | 1312 | npages = 0; |
1398 | } | 1313 | } |
1399 | } | 1314 | } |
1400 | how &= ~FLUSH_NOCOMMIT; | 1315 | how &= ~FLUSH_NOCOMMIT; |
1401 | spin_lock(&nfsi->req_lock); | 1316 | spin_lock(&nfsi->req_lock); |
1402 | do { | 1317 | do { |
1403 | wbc->pages_skipped = 0; | ||
1404 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | 1318 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); |
1405 | if (ret != 0) | 1319 | if (ret != 0) |
1406 | continue; | 1320 | continue; |
1407 | pages = nfs_scan_dirty(mapping, wbc, &head); | ||
1408 | if (pages != 0) { | ||
1409 | spin_unlock(&nfsi->req_lock); | ||
1410 | if (how & FLUSH_INVALIDATE) { | ||
1411 | nfs_cancel_dirty_list(&head); | ||
1412 | ret = pages; | ||
1413 | } else | ||
1414 | ret = nfs_flush_list(inode, &head, pages, how); | ||
1415 | spin_lock(&nfsi->req_lock); | ||
1416 | continue; | ||
1417 | } | ||
1418 | if (wbc->pages_skipped != 0) | ||
1419 | continue; | ||
1420 | if (nocommit) | 1321 | if (nocommit) |
1421 | break; | 1322 | break; |
1422 | pages = nfs_scan_commit(inode, &head, idx_start, npages); | 1323 | pages = nfs_scan_commit(inode, &head, idx_start, npages); |
1423 | if (pages == 0) { | 1324 | if (pages == 0) |
1424 | if (wbc->pages_skipped != 0) | ||
1425 | continue; | ||
1426 | break; | 1325 | break; |
1427 | } | ||
1428 | if (how & FLUSH_INVALIDATE) { | 1326 | if (how & FLUSH_INVALIDATE) { |
1429 | spin_unlock(&nfsi->req_lock); | 1327 | spin_unlock(&nfsi->req_lock); |
1430 | nfs_cancel_commit_list(&head); | 1328 | nfs_cancel_commit_list(&head); |
@@ -1456,7 +1354,7 @@ int nfs_wb_all(struct inode *inode) | |||
1456 | }; | 1354 | }; |
1457 | int ret; | 1355 | int ret; |
1458 | 1356 | ||
1459 | ret = generic_writepages(mapping, &wbc); | 1357 | ret = nfs_writepages(mapping, &wbc); |
1460 | if (ret < 0) | 1358 | if (ret < 0) |
1461 | goto out; | 1359 | goto out; |
1462 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); | 1360 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); |
@@ -1479,11 +1377,9 @@ int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, lo | |||
1479 | }; | 1377 | }; |
1480 | int ret; | 1378 | int ret; |
1481 | 1379 | ||
1482 | if (!(how & FLUSH_NOWRITEPAGE)) { | 1380 | ret = nfs_writepages(mapping, &wbc); |
1483 | ret = generic_writepages(mapping, &wbc); | 1381 | if (ret < 0) |
1484 | if (ret < 0) | 1382 | goto out; |
1485 | goto out; | ||
1486 | } | ||
1487 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); | 1383 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); |
1488 | if (ret >= 0) | 1384 | if (ret >= 0) |
1489 | return 0; | 1385 | return 0; |
@@ -1506,7 +1402,7 @@ int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) | |||
1506 | int ret; | 1402 | int ret; |
1507 | 1403 | ||
1508 | BUG_ON(!PageLocked(page)); | 1404 | BUG_ON(!PageLocked(page)); |
1509 | if (!(how & FLUSH_NOWRITEPAGE) && clear_page_dirty_for_io(page)) { | 1405 | if (clear_page_dirty_for_io(page)) { |
1510 | ret = nfs_writepage_locked(page, &wbc); | 1406 | ret = nfs_writepage_locked(page, &wbc); |
1511 | if (ret < 0) | 1407 | if (ret < 0) |
1512 | goto out; | 1408 | goto out; |
@@ -1531,10 +1427,18 @@ int nfs_wb_page(struct inode *inode, struct page* page) | |||
1531 | 1427 | ||
1532 | int nfs_set_page_dirty(struct page *page) | 1428 | int nfs_set_page_dirty(struct page *page) |
1533 | { | 1429 | { |
1534 | spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock; | 1430 | struct address_space *mapping = page->mapping; |
1431 | struct inode *inode; | ||
1432 | spinlock_t *req_lock; | ||
1535 | struct nfs_page *req; | 1433 | struct nfs_page *req; |
1536 | int ret; | 1434 | int ret; |
1537 | 1435 | ||
1436 | if (!mapping) | ||
1437 | goto out_raced; | ||
1438 | inode = mapping->host; | ||
1439 | if (!inode) | ||
1440 | goto out_raced; | ||
1441 | req_lock = &NFS_I(inode)->req_lock; | ||
1538 | spin_lock(req_lock); | 1442 | spin_lock(req_lock); |
1539 | req = nfs_page_find_request_locked(page); | 1443 | req = nfs_page_find_request_locked(page); |
1540 | if (req != NULL) { | 1444 | if (req != NULL) { |
@@ -1547,6 +1451,8 @@ int nfs_set_page_dirty(struct page *page) | |||
1547 | ret = __set_page_dirty_nobuffers(page); | 1451 | ret = __set_page_dirty_nobuffers(page); |
1548 | spin_unlock(req_lock); | 1452 | spin_unlock(req_lock); |
1549 | return ret; | 1453 | return ret; |
1454 | out_raced: | ||
1455 | return !TestSetPageDirty(page); | ||
1550 | } | 1456 | } |
1551 | 1457 | ||
1552 | 1458 | ||
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index fb14d68eacab..32ffea033c7a 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -315,16 +315,13 @@ out: | |||
315 | /* | 315 | /* |
316 | * RPC procedure tables | 316 | * RPC procedure tables |
317 | */ | 317 | */ |
318 | #ifndef MAX | ||
319 | # define MAX(a, b) (((a) > (b))? (a) : (b)) | ||
320 | #endif | ||
321 | |||
322 | #define PROC(proc, call, argtype, restype) \ | 318 | #define PROC(proc, call, argtype, restype) \ |
323 | [NFSPROC4_CLNT_##proc] = { \ | 319 | [NFSPROC4_CLNT_##proc] = { \ |
324 | .p_proc = NFSPROC4_CB_##call, \ | 320 | .p_proc = NFSPROC4_CB_##call, \ |
325 | .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ | 321 | .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ |
326 | .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ | 322 | .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ |
327 | .p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \ | 323 | .p_arglen = NFS4_##argtype##_sz, \ |
324 | .p_replen = NFS4_##restype##_sz, \ | ||
328 | .p_statidx = NFSPROC4_CB_##call, \ | 325 | .p_statidx = NFSPROC4_CB_##call, \ |
329 | .p_name = #proc, \ | 326 | .p_name = #proc, \ |
330 | } | 327 | } |