aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/Kconfig10
-rw-r--r--fs/nfs/callback_proc.c19
-rw-r--r--fs/nfs/client.c21
-rw-r--r--fs/nfs/delegation.c16
-rw-r--r--fs/nfs/delegation.h4
-rw-r--r--fs/nfs/dir.c13
-rw-r--r--fs/nfs/direct.c29
-rw-r--r--fs/nfs/file.c64
-rw-r--r--fs/nfs/inode.c74
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs2xdr.c7
-rw-r--r--fs/nfs/nfs3xdr.c8
-rw-r--r--fs/nfs/nfs4_fs.h57
-rw-r--r--fs/nfs/nfs4proc.c474
-rw-r--r--fs/nfs/nfs4renewd.c4
-rw-r--r--fs/nfs/nfs4state.c82
-rw-r--r--fs/nfs/nfs4xdr.c107
-rw-r--r--fs/nfs/nfsroot.c2
-rw-r--r--fs/nfs/pagelist.c8
-rw-r--r--fs/nfs/read.c3
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c39
23 files changed, 658 insertions, 399 deletions
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index a43d07e7b924..cc1bb33b59b8 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -61,8 +61,8 @@ config NFS_V3_ACL
61 If unsure, say N. 61 If unsure, say N.
62 62
63config NFS_V4 63config NFS_V4
64 bool "NFS client support for NFS version 4 (EXPERIMENTAL)" 64 bool "NFS client support for NFS version 4"
65 depends on NFS_FS && EXPERIMENTAL 65 depends on NFS_FS
66 select RPCSEC_GSS_KRB5 66 select RPCSEC_GSS_KRB5
67 help 67 help
68 This option enables support for version 4 of the NFS protocol 68 This option enables support for version 4 of the NFS protocol
@@ -72,16 +72,16 @@ config NFS_V4
72 space programs which can be found in the Linux nfs-utils package, 72 space programs which can be found in the Linux nfs-utils package,
73 available from http://linux-nfs.org/. 73 available from http://linux-nfs.org/.
74 74
75 If unsure, say N. 75 If unsure, say Y.
76 76
77config NFS_V4_1 77config NFS_V4_1
78 bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)" 78 bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
79 depends on NFS_V4 && EXPERIMENTAL 79 depends on NFS_V4 && EXPERIMENTAL
80 help 80 help
81 This option enables support for minor version 1 of the NFSv4 protocol 81 This option enables support for minor version 1 of the NFSv4 protocol
82 (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client. 82 (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
83 83
84 Unless you're an NFS developer, say N. 84 If unsure, say N.
85 85
86config ROOT_NFS 86config ROOT_NFS
87 bool "Root file system on NFS" 87 bool "Root file system on NFS"
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index a08770a7e857..930d10fecdaf 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -37,8 +37,8 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
37 if (inode == NULL) 37 if (inode == NULL)
38 goto out_putclient; 38 goto out_putclient;
39 nfsi = NFS_I(inode); 39 nfsi = NFS_I(inode);
40 down_read(&nfsi->rwsem); 40 rcu_read_lock();
41 delegation = nfsi->delegation; 41 delegation = rcu_dereference(nfsi->delegation);
42 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0) 42 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
43 goto out_iput; 43 goto out_iput;
44 res->size = i_size_read(inode); 44 res->size = i_size_read(inode);
@@ -53,7 +53,7 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
53 args->bitmap[1]; 53 args->bitmap[1];
54 res->status = 0; 54 res->status = 0;
55out_iput: 55out_iput:
56 up_read(&nfsi->rwsem); 56 rcu_read_unlock();
57 iput(inode); 57 iput(inode);
58out_putclient: 58out_putclient:
59 nfs_put_client(clp); 59 nfs_put_client(clp);
@@ -62,16 +62,6 @@ out:
62 return res->status; 62 return res->status;
63} 63}
64 64
65static int (*nfs_validate_delegation_stateid(struct nfs_client *clp))(struct nfs_delegation *, const nfs4_stateid *)
66{
67#if defined(CONFIG_NFS_V4_1)
68 if (clp->cl_minorversion > 0)
69 return nfs41_validate_delegation_stateid;
70#endif
71 return nfs4_validate_delegation_stateid;
72}
73
74
75__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy) 65__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
76{ 66{
77 struct nfs_client *clp; 67 struct nfs_client *clp;
@@ -92,8 +82,7 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
92 inode = nfs_delegation_find_inode(clp, &args->fh); 82 inode = nfs_delegation_find_inode(clp, &args->fh);
93 if (inode != NULL) { 83 if (inode != NULL) {
94 /* Set up a helper thread to actually return the delegation */ 84 /* Set up a helper thread to actually return the delegation */
95 switch (nfs_async_inode_return_delegation(inode, &args->stateid, 85 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
96 nfs_validate_delegation_stateid(clp))) {
97 case 0: 86 case 0:
98 res = 0; 87 res = 0;
99 break; 88 break;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d25b5257b7a1..4e7df2adb212 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -150,6 +150,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
150 clp->cl_boot_time = CURRENT_TIME; 150 clp->cl_boot_time = CURRENT_TIME;
151 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 151 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
152 clp->cl_minorversion = cl_init->minorversion; 152 clp->cl_minorversion = cl_init->minorversion;
153 clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
153#endif 154#endif
154 cred = rpc_lookup_machine_cred(); 155 cred = rpc_lookup_machine_cred();
155 if (!IS_ERR(cred)) 156 if (!IS_ERR(cred))
@@ -178,7 +179,7 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)
178 clp->cl_session = NULL; 179 clp->cl_session = NULL;
179 } 180 }
180 181
181 clp->cl_call_sync = _nfs4_call_sync; 182 clp->cl_mvops = nfs_v4_minor_ops[0];
182#endif /* CONFIG_NFS_V4_1 */ 183#endif /* CONFIG_NFS_V4_1 */
183} 184}
184 185
@@ -188,7 +189,7 @@ static void nfs4_clear_client_minor_version(struct nfs_client *clp)
188static void nfs4_destroy_callback(struct nfs_client *clp) 189static void nfs4_destroy_callback(struct nfs_client *clp)
189{ 190{
190 if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) 191 if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
191 nfs_callback_down(clp->cl_minorversion); 192 nfs_callback_down(clp->cl_mvops->minor_version);
192} 193}
193 194
194static void nfs4_shutdown_client(struct nfs_client *clp) 195static void nfs4_shutdown_client(struct nfs_client *clp)
@@ -1126,7 +1127,7 @@ static int nfs4_init_callback(struct nfs_client *clp)
1126 return error; 1127 return error;
1127 } 1128 }
1128 1129
1129 error = nfs_callback_up(clp->cl_minorversion, 1130 error = nfs_callback_up(clp->cl_mvops->minor_version,
1130 clp->cl_rpcclient->cl_xprt); 1131 clp->cl_rpcclient->cl_xprt);
1131 if (error < 0) { 1132 if (error < 0) {
1132 dprintk("%s: failed to start callback. Error = %d\n", 1133 dprintk("%s: failed to start callback. Error = %d\n",
@@ -1143,10 +1144,8 @@ static int nfs4_init_callback(struct nfs_client *clp)
1143 */ 1144 */
1144static int nfs4_init_client_minor_version(struct nfs_client *clp) 1145static int nfs4_init_client_minor_version(struct nfs_client *clp)
1145{ 1146{
1146 clp->cl_call_sync = _nfs4_call_sync;
1147
1148#if defined(CONFIG_NFS_V4_1) 1147#if defined(CONFIG_NFS_V4_1)
1149 if (clp->cl_minorversion) { 1148 if (clp->cl_mvops->minor_version) {
1150 struct nfs4_session *session = NULL; 1149 struct nfs4_session *session = NULL;
1151 /* 1150 /*
1152 * Create the session and mark it expired. 1151 * Create the session and mark it expired.
@@ -1158,7 +1157,13 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
1158 return -ENOMEM; 1157 return -ENOMEM;
1159 1158
1160 clp->cl_session = session; 1159 clp->cl_session = session;
1161 clp->cl_call_sync = _nfs4_call_sync_session; 1160 /*
1161 * The create session reply races with the server back
1162 * channel probe. Mark the client NFS_CS_SESSION_INITING
1163 * so that the client back channel can find the
1164 * nfs_client struct
1165 */
1166 clp->cl_cons_state = NFS_CS_SESSION_INITING;
1162 } 1167 }
1163#endif /* CONFIG_NFS_V4_1 */ 1168#endif /* CONFIG_NFS_V4_1 */
1164 1169
@@ -1454,7 +1459,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
1454 data->authflavor, 1459 data->authflavor,
1455 parent_server->client->cl_xprt->prot, 1460 parent_server->client->cl_xprt->prot,
1456 parent_server->client->cl_timeout, 1461 parent_server->client->cl_timeout,
1457 parent_client->cl_minorversion); 1462 parent_client->cl_mvops->minor_version);
1458 if (error < 0) 1463 if (error < 0)
1459 goto error; 1464 goto error;
1460 1465
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 301634543974..b9c3c43cea1d 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -268,14 +268,6 @@ out:
268 return status; 268 return status;
269} 269}
270 270
271/* Sync all data to disk upon delegation return */
272static void nfs_msync_inode(struct inode *inode)
273{
274 filemap_fdatawrite(inode->i_mapping);
275 nfs_wb_all(inode);
276 filemap_fdatawait(inode->i_mapping);
277}
278
279/* 271/*
280 * Basic procedure for returning a delegation to the server 272 * Basic procedure for returning a delegation to the server
281 */ 273 */
@@ -367,7 +359,7 @@ int nfs_inode_return_delegation(struct inode *inode)
367 delegation = nfs_detach_delegation_locked(nfsi, NULL, clp); 359 delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
368 spin_unlock(&clp->cl_lock); 360 spin_unlock(&clp->cl_lock);
369 if (delegation != NULL) { 361 if (delegation != NULL) {
370 nfs_msync_inode(inode); 362 nfs_wb_all(inode);
371 err = __nfs_inode_return_delegation(inode, delegation, 1); 363 err = __nfs_inode_return_delegation(inode, delegation, 1);
372 } 364 }
373 } 365 }
@@ -471,9 +463,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
471/* 463/*
472 * Asynchronous delegation recall! 464 * Asynchronous delegation recall!
473 */ 465 */
474int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid, 466int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
475 int (*validate_stateid)(struct nfs_delegation *delegation,
476 const nfs4_stateid *stateid))
477{ 467{
478 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 468 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
479 struct nfs_delegation *delegation; 469 struct nfs_delegation *delegation;
@@ -481,7 +471,7 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
481 rcu_read_lock(); 471 rcu_read_lock();
482 delegation = rcu_dereference(NFS_I(inode)->delegation); 472 delegation = rcu_dereference(NFS_I(inode)->delegation);
483 473
484 if (!validate_stateid(delegation, stateid)) { 474 if (!clp->cl_mvops->validate_stateid(delegation, stateid)) {
485 rcu_read_unlock(); 475 rcu_read_unlock();
486 return -ENOENT; 476 return -ENOENT;
487 } 477 }
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 69e7b8140122..2026304bda19 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -34,9 +34,7 @@ enum {
34int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 34int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
35void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 35void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
36int nfs_inode_return_delegation(struct inode *inode); 36int nfs_inode_return_delegation(struct inode *inode);
37int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid, 37int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
38 int (*validate_stateid)(struct nfs_delegation *delegation,
39 const nfs4_stateid *stateid));
40void nfs_inode_return_delegation_noreclaim(struct inode *inode); 38void nfs_inode_return_delegation_noreclaim(struct inode *inode);
41 39
42struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); 40struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 782b431ef91c..29539ceeb745 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1652,16 +1652,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1652 } 1652 }
1653 } 1653 }
1654 1654
1655 /*
1656 * ... prune child dentries and writebacks if needed.
1657 */
1658 if (atomic_read(&old_dentry->d_count) > 1) {
1659 if (S_ISREG(old_inode->i_mode))
1660 nfs_wb_all(old_inode);
1661 shrink_dcache_parent(old_dentry);
1662 }
1663 nfs_inode_return_delegation(old_inode); 1655 nfs_inode_return_delegation(old_inode);
1664
1665 if (new_inode != NULL) 1656 if (new_inode != NULL)
1666 nfs_inode_return_delegation(new_inode); 1657 nfs_inode_return_delegation(new_inode);
1667 1658
@@ -1710,7 +1701,7 @@ static void nfs_access_free_list(struct list_head *head)
1710 } 1701 }
1711} 1702}
1712 1703
1713int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) 1704int nfs_access_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
1714{ 1705{
1715 LIST_HEAD(head); 1706 LIST_HEAD(head);
1716 struct nfs_inode *nfsi; 1707 struct nfs_inode *nfsi;
@@ -1953,7 +1944,7 @@ int nfs_permission(struct inode *inode, int mask)
1953 if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) 1944 if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
1954 goto out; 1945 goto out;
1955 /* Is this sys_access() ? */ 1946 /* Is this sys_access() ? */
1956 if (mask & MAY_ACCESS) 1947 if (mask & (MAY_ACCESS | MAY_CHDIR))
1957 goto force_lookup; 1948 goto force_lookup;
1958 1949
1959 switch (inode->i_mode & S_IFMT) { 1950 switch (inode->i_mode & S_IFMT) {
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index ad4cd31d6050..064a80961677 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -69,6 +69,7 @@ struct nfs_direct_req {
69 69
70 /* I/O parameters */ 70 /* I/O parameters */
71 struct nfs_open_context *ctx; /* file open context info */ 71 struct nfs_open_context *ctx; /* file open context info */
72 struct nfs_lock_context *l_ctx; /* Lock context info */
72 struct kiocb * iocb; /* controlling i/o request */ 73 struct kiocb * iocb; /* controlling i/o request */
73 struct inode * inode; /* target file of i/o */ 74 struct inode * inode; /* target file of i/o */
74 75
@@ -160,6 +161,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
160 INIT_LIST_HEAD(&dreq->rewrite_list); 161 INIT_LIST_HEAD(&dreq->rewrite_list);
161 dreq->iocb = NULL; 162 dreq->iocb = NULL;
162 dreq->ctx = NULL; 163 dreq->ctx = NULL;
164 dreq->l_ctx = NULL;
163 spin_lock_init(&dreq->lock); 165 spin_lock_init(&dreq->lock);
164 atomic_set(&dreq->io_count, 0); 166 atomic_set(&dreq->io_count, 0);
165 dreq->count = 0; 167 dreq->count = 0;
@@ -173,6 +175,8 @@ static void nfs_direct_req_free(struct kref *kref)
173{ 175{
174 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 176 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
175 177
178 if (dreq->l_ctx != NULL)
179 nfs_put_lock_context(dreq->l_ctx);
176 if (dreq->ctx != NULL) 180 if (dreq->ctx != NULL)
177 put_nfs_open_context(dreq->ctx); 181 put_nfs_open_context(dreq->ctx);
178 kmem_cache_free(nfs_direct_cachep, dreq); 182 kmem_cache_free(nfs_direct_cachep, dreq);
@@ -336,6 +340,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
336 data->cred = msg.rpc_cred; 340 data->cred = msg.rpc_cred;
337 data->args.fh = NFS_FH(inode); 341 data->args.fh = NFS_FH(inode);
338 data->args.context = ctx; 342 data->args.context = ctx;
343 data->args.lock_context = dreq->l_ctx;
339 data->args.offset = pos; 344 data->args.offset = pos;
340 data->args.pgbase = pgbase; 345 data->args.pgbase = pgbase;
341 data->args.pages = data->pagevec; 346 data->args.pages = data->pagevec;
@@ -416,24 +421,28 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
416static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, 421static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
417 unsigned long nr_segs, loff_t pos) 422 unsigned long nr_segs, loff_t pos)
418{ 423{
419 ssize_t result = 0; 424 ssize_t result = -ENOMEM;
420 struct inode *inode = iocb->ki_filp->f_mapping->host; 425 struct inode *inode = iocb->ki_filp->f_mapping->host;
421 struct nfs_direct_req *dreq; 426 struct nfs_direct_req *dreq;
422 427
423 dreq = nfs_direct_req_alloc(); 428 dreq = nfs_direct_req_alloc();
424 if (!dreq) 429 if (dreq == NULL)
425 return -ENOMEM; 430 goto out;
426 431
427 dreq->inode = inode; 432 dreq->inode = inode;
428 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); 433 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
434 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
435 if (dreq->l_ctx == NULL)
436 goto out_release;
429 if (!is_sync_kiocb(iocb)) 437 if (!is_sync_kiocb(iocb))
430 dreq->iocb = iocb; 438 dreq->iocb = iocb;
431 439
432 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos); 440 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
433 if (!result) 441 if (!result)
434 result = nfs_direct_wait(dreq); 442 result = nfs_direct_wait(dreq);
443out_release:
435 nfs_direct_req_release(dreq); 444 nfs_direct_req_release(dreq);
436 445out:
437 return result; 446 return result;
438} 447}
439 448
@@ -574,6 +583,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
574 data->args.offset = 0; 583 data->args.offset = 0;
575 data->args.count = 0; 584 data->args.count = 0;
576 data->args.context = dreq->ctx; 585 data->args.context = dreq->ctx;
586 data->args.lock_context = dreq->l_ctx;
577 data->res.count = 0; 587 data->res.count = 0;
578 data->res.fattr = &data->fattr; 588 data->res.fattr = &data->fattr;
579 data->res.verf = &data->verf; 589 data->res.verf = &data->verf;
@@ -761,6 +771,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
761 data->cred = msg.rpc_cred; 771 data->cred = msg.rpc_cred;
762 data->args.fh = NFS_FH(inode); 772 data->args.fh = NFS_FH(inode);
763 data->args.context = ctx; 773 data->args.context = ctx;
774 data->args.lock_context = dreq->l_ctx;
764 data->args.offset = pos; 775 data->args.offset = pos;
765 data->args.pgbase = pgbase; 776 data->args.pgbase = pgbase;
766 data->args.pages = data->pagevec; 777 data->args.pages = data->pagevec;
@@ -845,7 +856,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
845 unsigned long nr_segs, loff_t pos, 856 unsigned long nr_segs, loff_t pos,
846 size_t count) 857 size_t count)
847{ 858{
848 ssize_t result = 0; 859 ssize_t result = -ENOMEM;
849 struct inode *inode = iocb->ki_filp->f_mapping->host; 860 struct inode *inode = iocb->ki_filp->f_mapping->host;
850 struct nfs_direct_req *dreq; 861 struct nfs_direct_req *dreq;
851 size_t wsize = NFS_SERVER(inode)->wsize; 862 size_t wsize = NFS_SERVER(inode)->wsize;
@@ -853,7 +864,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
853 864
854 dreq = nfs_direct_req_alloc(); 865 dreq = nfs_direct_req_alloc();
855 if (!dreq) 866 if (!dreq)
856 return -ENOMEM; 867 goto out;
857 nfs_alloc_commit_data(dreq); 868 nfs_alloc_commit_data(dreq);
858 869
859 if (dreq->commit_data == NULL || count < wsize) 870 if (dreq->commit_data == NULL || count < wsize)
@@ -861,14 +872,18 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
861 872
862 dreq->inode = inode; 873 dreq->inode = inode;
863 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); 874 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
875 dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
876 if (dreq->l_ctx != NULL)
877 goto out_release;
864 if (!is_sync_kiocb(iocb)) 878 if (!is_sync_kiocb(iocb))
865 dreq->iocb = iocb; 879 dreq->iocb = iocb;
866 880
867 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync); 881 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
868 if (!result) 882 if (!result)
869 result = nfs_direct_wait(dreq); 883 result = nfs_direct_wait(dreq);
884out_release:
870 nfs_direct_req_release(dreq); 885 nfs_direct_req_release(dreq);
871 886out:
872 return result; 887 return result;
873} 888}
874 889
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 36a5e74f51b4..2d141a74ae82 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -27,6 +27,7 @@
27#include <linux/pagemap.h> 27#include <linux/pagemap.h>
28#include <linux/aio.h> 28#include <linux/aio.h>
29#include <linux/gfp.h> 29#include <linux/gfp.h>
30#include <linux/swap.h>
30 31
31#include <asm/uaccess.h> 32#include <asm/uaccess.h>
32#include <asm/system.h> 33#include <asm/system.h>
@@ -202,37 +203,11 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
202} 203}
203 204
204/* 205/*
205 * Helper for nfs_file_flush() and nfs_file_fsync()
206 *
207 * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
208 * disk, but it retrieves and clears ctx->error after synching, despite
209 * the two being set at the same time in nfs_context_set_write_error().
210 * This is because the former is used to notify the _next_ call to
211 * nfs_file_write() that a write error occured, and hence cause it to
212 * fall back to doing a synchronous write.
213 */
214static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode)
215{
216 int have_error, status;
217 int ret = 0;
218
219 have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
220 status = nfs_wb_all(inode);
221 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
222 if (have_error)
223 ret = xchg(&ctx->error, 0);
224 if (!ret)
225 ret = status;
226 return ret;
227}
228
229/*
230 * Flush all dirty pages, and check for write errors. 206 * Flush all dirty pages, and check for write errors.
231 */ 207 */
232static int 208static int
233nfs_file_flush(struct file *file, fl_owner_t id) 209nfs_file_flush(struct file *file, fl_owner_t id)
234{ 210{
235 struct nfs_open_context *ctx = nfs_file_open_context(file);
236 struct dentry *dentry = file->f_path.dentry; 211 struct dentry *dentry = file->f_path.dentry;
237 struct inode *inode = dentry->d_inode; 212 struct inode *inode = dentry->d_inode;
238 213
@@ -245,7 +220,7 @@ nfs_file_flush(struct file *file, fl_owner_t id)
245 return 0; 220 return 0;
246 221
247 /* Flush writes to the server and return any errors */ 222 /* Flush writes to the server and return any errors */
248 return nfs_do_fsync(ctx, inode); 223 return vfs_fsync(file, 0);
249} 224}
250 225
251static ssize_t 226static ssize_t
@@ -320,6 +295,13 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
320 * Flush any dirty pages for this process, and check for write errors. 295 * Flush any dirty pages for this process, and check for write errors.
321 * The return status from this call provides a reliable indication of 296 * The return status from this call provides a reliable indication of
322 * whether any write errors occurred for this process. 297 * whether any write errors occurred for this process.
298 *
299 * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
300 * disk, but it retrieves and clears ctx->error after synching, despite
301 * the two being set at the same time in nfs_context_set_write_error().
302 * This is because the former is used to notify the _next_ call to
303 * nfs_file_write() that a write error occured, and hence cause it to
304 * fall back to doing a synchronous write.
323 */ 305 */
324static int 306static int
325nfs_file_fsync(struct file *file, int datasync) 307nfs_file_fsync(struct file *file, int datasync)
@@ -327,13 +309,23 @@ nfs_file_fsync(struct file *file, int datasync)
327 struct dentry *dentry = file->f_path.dentry; 309 struct dentry *dentry = file->f_path.dentry;
328 struct nfs_open_context *ctx = nfs_file_open_context(file); 310 struct nfs_open_context *ctx = nfs_file_open_context(file);
329 struct inode *inode = dentry->d_inode; 311 struct inode *inode = dentry->d_inode;
312 int have_error, status;
313 int ret = 0;
314
330 315
331 dprintk("NFS: fsync file(%s/%s) datasync %d\n", 316 dprintk("NFS: fsync file(%s/%s) datasync %d\n",
332 dentry->d_parent->d_name.name, dentry->d_name.name, 317 dentry->d_parent->d_name.name, dentry->d_name.name,
333 datasync); 318 datasync);
334 319
335 nfs_inc_stats(inode, NFSIOS_VFSFSYNC); 320 nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
336 return nfs_do_fsync(ctx, inode); 321 have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
322 status = nfs_commit_inode(inode, FLUSH_SYNC);
323 have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
324 if (have_error)
325 ret = xchg(&ctx->error, 0);
326 if (!ret)
327 ret = status;
328 return ret;
337} 329}
338 330
339/* 331/*
@@ -493,11 +485,19 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
493 */ 485 */
494static int nfs_release_page(struct page *page, gfp_t gfp) 486static int nfs_release_page(struct page *page, gfp_t gfp)
495{ 487{
488 struct address_space *mapping = page->mapping;
489
496 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page); 490 dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
497 491
498 /* Only do I/O if gfp is a superset of GFP_KERNEL */ 492 /* Only do I/O if gfp is a superset of GFP_KERNEL */
499 if ((gfp & GFP_KERNEL) == GFP_KERNEL) 493 if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
500 nfs_wb_page(page->mapping->host, page); 494 int how = FLUSH_SYNC;
495
496 /* Don't let kswapd deadlock waiting for OOM RPC calls */
497 if (current_is_kswapd())
498 how = 0;
499 nfs_commit_inode(mapping->host, how);
500 }
501 /* If PagePrivate() is set, then the page is not freeable */ 501 /* If PagePrivate() is set, then the page is not freeable */
502 if (PagePrivate(page)) 502 if (PagePrivate(page))
503 return 0; 503 return 0;
@@ -639,7 +639,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
639 639
640 /* Return error values for O_DSYNC and IS_SYNC() */ 640 /* Return error values for O_DSYNC and IS_SYNC() */
641 if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) { 641 if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
642 int err = nfs_do_fsync(nfs_file_open_context(iocb->ki_filp), inode); 642 int err = vfs_fsync(iocb->ki_filp, 0);
643 if (err < 0) 643 if (err < 0)
644 result = err; 644 result = err;
645 } 645 }
@@ -675,7 +675,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
675 written = ret; 675 written = ret;
676 676
677 if (ret >= 0 && nfs_need_sync_write(filp, inode)) { 677 if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
678 int err = nfs_do_fsync(nfs_file_open_context(filp), inode); 678 int err = vfs_fsync(filp, 0);
679 if (err < 0) 679 if (err < 0)
680 ret = err; 680 ret = err;
681 } 681 }
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 099b3518feea..581d8f081e68 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -413,10 +413,8 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
413 return 0; 413 return 0;
414 414
415 /* Write all dirty data */ 415 /* Write all dirty data */
416 if (S_ISREG(inode->i_mode)) { 416 if (S_ISREG(inode->i_mode))
417 filemap_write_and_wait(inode->i_mapping);
418 nfs_wb_all(inode); 417 nfs_wb_all(inode);
419 }
420 418
421 fattr = nfs_alloc_fattr(); 419 fattr = nfs_alloc_fattr();
422 if (fattr == NULL) 420 if (fattr == NULL)
@@ -530,6 +528,68 @@ out:
530 return err; 528 return err;
531} 529}
532 530
531static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
532{
533 atomic_set(&l_ctx->count, 1);
534 l_ctx->lockowner = current->files;
535 l_ctx->pid = current->tgid;
536 INIT_LIST_HEAD(&l_ctx->list);
537}
538
539static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
540{
541 struct nfs_lock_context *pos;
542
543 list_for_each_entry(pos, &ctx->lock_context.list, list) {
544 if (pos->lockowner != current->files)
545 continue;
546 if (pos->pid != current->tgid)
547 continue;
548 atomic_inc(&pos->count);
549 return pos;
550 }
551 return NULL;
552}
553
554struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
555{
556 struct nfs_lock_context *res, *new = NULL;
557 struct inode *inode = ctx->path.dentry->d_inode;
558
559 spin_lock(&inode->i_lock);
560 res = __nfs_find_lock_context(ctx);
561 if (res == NULL) {
562 spin_unlock(&inode->i_lock);
563 new = kmalloc(sizeof(*new), GFP_KERNEL);
564 if (new == NULL)
565 return NULL;
566 nfs_init_lock_context(new);
567 spin_lock(&inode->i_lock);
568 res = __nfs_find_lock_context(ctx);
569 if (res == NULL) {
570 list_add_tail(&new->list, &ctx->lock_context.list);
571 new->open_context = ctx;
572 res = new;
573 new = NULL;
574 }
575 }
576 spin_unlock(&inode->i_lock);
577 kfree(new);
578 return res;
579}
580
581void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
582{
583 struct nfs_open_context *ctx = l_ctx->open_context;
584 struct inode *inode = ctx->path.dentry->d_inode;
585
586 if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
587 return;
588 list_del(&l_ctx->list);
589 spin_unlock(&inode->i_lock);
590 kfree(l_ctx);
591}
592
533/** 593/**
534 * nfs_close_context - Common close_context() routine NFSv2/v3 594 * nfs_close_context - Common close_context() routine NFSv2/v3
535 * @ctx: pointer to context 595 * @ctx: pointer to context
@@ -566,11 +626,11 @@ static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct
566 path_get(&ctx->path); 626 path_get(&ctx->path);
567 ctx->cred = get_rpccred(cred); 627 ctx->cred = get_rpccred(cred);
568 ctx->state = NULL; 628 ctx->state = NULL;
569 ctx->lockowner = current->files;
570 ctx->flags = 0; 629 ctx->flags = 0;
571 ctx->error = 0; 630 ctx->error = 0;
572 ctx->dir_cookie = 0; 631 ctx->dir_cookie = 0;
573 atomic_set(&ctx->count, 1); 632 nfs_init_lock_context(&ctx->lock_context);
633 ctx->lock_context.open_context = ctx;
574 } 634 }
575 return ctx; 635 return ctx;
576} 636}
@@ -578,7 +638,7 @@ static struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct
578struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) 638struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
579{ 639{
580 if (ctx != NULL) 640 if (ctx != NULL)
581 atomic_inc(&ctx->count); 641 atomic_inc(&ctx->lock_context.count);
582 return ctx; 642 return ctx;
583} 643}
584 644
@@ -586,7 +646,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
586{ 646{
587 struct inode *inode = ctx->path.dentry->d_inode; 647 struct inode *inode = ctx->path.dentry->d_inode;
588 648
589 if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock)) 649 if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
590 return; 650 return;
591 list_del(&ctx->list); 651 list_del(&ctx->list);
592 spin_unlock(&inode->i_lock); 652 spin_unlock(&inode->i_lock);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index d8bd619e386c..4c2150d86714 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -205,7 +205,8 @@ extern struct rpc_procinfo nfs4_procedures[];
205void nfs_close_context(struct nfs_open_context *ctx, int is_sync); 205void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
206 206
207/* dir.c */ 207/* dir.c */
208extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask); 208extern int nfs_access_cache_shrinker(struct shrinker *shrink,
209 int nr_to_scan, gfp_t gfp_mask);
209 210
210/* inode.c */ 211/* inode.c */
211extern struct workqueue_struct *nfsiod_workqueue; 212extern struct workqueue_struct *nfsiod_workqueue;
@@ -369,10 +370,9 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
369 * Helper for restarting RPC calls in the possible presence of NFSv4.1 370 * Helper for restarting RPC calls in the possible presence of NFSv4.1
370 * sessions. 371 * sessions.
371 */ 372 */
372static inline void nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp) 373static inline int nfs_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
373{ 374{
374 if (nfs4_has_session(clp)) 375 if (nfs4_has_session(clp))
375 rpc_restart_call_prepare(task); 376 return rpc_restart_call_prepare(task);
376 else 377 return rpc_restart_call(task);
377 rpc_restart_call(task);
378} 378}
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 81cf14257916..db8846a0e82e 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -233,7 +233,7 @@ nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs
233static int 233static int
234nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 234nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
235{ 235{
236 struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 236 struct rpc_auth *auth = req->rq_cred->cr_auth;
237 unsigned int replen; 237 unsigned int replen;
238 u32 offset = (u32)args->offset; 238 u32 offset = (u32)args->offset;
239 u32 count = args->count; 239 u32 count = args->count;
@@ -393,8 +393,7 @@ nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *arg
393static int 393static int
394nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) 394nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
395{ 395{
396 struct rpc_task *task = req->rq_task; 396 struct rpc_auth *auth = req->rq_cred->cr_auth;
397 struct rpc_auth *auth = task->tk_msg.rpc_cred->cr_auth;
398 unsigned int replen; 397 unsigned int replen;
399 u32 count = args->count; 398 u32 count = args->count;
400 399
@@ -575,7 +574,7 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
575static int 574static int
576nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args) 575nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
577{ 576{
578 struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 577 struct rpc_auth *auth = req->rq_cred->cr_auth;
579 unsigned int replen; 578 unsigned int replen;
580 579
581 p = xdr_encode_fhandle(p, args->fh); 580 p = xdr_encode_fhandle(p, args->fh);
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 75dcfc7da365..9769704f8ce6 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -330,7 +330,7 @@ nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *arg
330static int 330static int
331nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) 331nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
332{ 332{
333 struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 333 struct rpc_auth *auth = req->rq_cred->cr_auth;
334 unsigned int replen; 334 unsigned int replen;
335 u32 count = args->count; 335 u32 count = args->count;
336 336
@@ -471,7 +471,7 @@ nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args)
471static int 471static int
472nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args) 472nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
473{ 473{
474 struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 474 struct rpc_auth *auth = req->rq_cred->cr_auth;
475 unsigned int replen; 475 unsigned int replen;
476 u32 count = args->count; 476 u32 count = args->count;
477 477
@@ -675,7 +675,7 @@ static int
675nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p, 675nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
676 struct nfs3_getaclargs *args) 676 struct nfs3_getaclargs *args)
677{ 677{
678 struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 678 struct rpc_auth *auth = req->rq_cred->cr_auth;
679 unsigned int replen; 679 unsigned int replen;
680 680
681 p = xdr_encode_fhandle(p, args->fh); 681 p = xdr_encode_fhandle(p, args->fh);
@@ -802,7 +802,7 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
802static int 802static int
803nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args) 803nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
804{ 804{
805 struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 805 struct rpc_auth *auth = req->rq_cred->cr_auth;
806 unsigned int replen; 806 unsigned int replen;
807 807
808 p = xdr_encode_fhandle(p, args->fh); 808 p = xdr_encode_fhandle(p, args->fh);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c538c6106e16..311e15cc8af0 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -45,10 +45,29 @@ enum nfs4_client_state {
45 NFS4CLNT_RECLAIM_NOGRACE, 45 NFS4CLNT_RECLAIM_NOGRACE,
46 NFS4CLNT_DELEGRETURN, 46 NFS4CLNT_DELEGRETURN,
47 NFS4CLNT_SESSION_RESET, 47 NFS4CLNT_SESSION_RESET,
48 NFS4CLNT_SESSION_DRAINING,
49 NFS4CLNT_RECALL_SLOT, 48 NFS4CLNT_RECALL_SLOT,
50}; 49};
51 50
51enum nfs4_session_state {
52 NFS4_SESSION_INITING,
53 NFS4_SESSION_DRAINING,
54};
55
56struct nfs4_minor_version_ops {
57 u32 minor_version;
58
59 int (*call_sync)(struct nfs_server *server,
60 struct rpc_message *msg,
61 struct nfs4_sequence_args *args,
62 struct nfs4_sequence_res *res,
63 int cache_reply);
64 int (*validate_stateid)(struct nfs_delegation *,
65 const nfs4_stateid *);
66 const struct nfs4_state_recovery_ops *reboot_recovery_ops;
67 const struct nfs4_state_recovery_ops *nograce_recovery_ops;
68 const struct nfs4_state_maintenance_ops *state_renewal_ops;
69};
70
52/* 71/*
53 * struct rpc_sequence ensures that RPC calls are sent in the exact 72 * struct rpc_sequence ensures that RPC calls are sent in the exact
54 * order that they appear on the list. 73 * order that they appear on the list.
@@ -89,7 +108,6 @@ struct nfs_unique_id {
89 */ 108 */
90struct nfs4_state_owner { 109struct nfs4_state_owner {
91 struct nfs_unique_id so_owner_id; 110 struct nfs_unique_id so_owner_id;
92 struct nfs_client *so_client;
93 struct nfs_server *so_server; 111 struct nfs_server *so_server;
94 struct rb_node so_client_node; 112 struct rb_node so_client_node;
95 113
@@ -99,7 +117,6 @@ struct nfs4_state_owner {
99 atomic_t so_count; 117 atomic_t so_count;
100 unsigned long so_flags; 118 unsigned long so_flags;
101 struct list_head so_states; 119 struct list_head so_states;
102 struct list_head so_delegations;
103 struct nfs_seqid_counter so_seqid; 120 struct nfs_seqid_counter so_seqid;
104 struct rpc_sequence so_sequence; 121 struct rpc_sequence so_sequence;
105}; 122};
@@ -125,10 +142,20 @@ enum {
125 * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN) 142 * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
126 */ 143 */
127 144
145struct nfs4_lock_owner {
146 unsigned int lo_type;
147#define NFS4_ANY_LOCK_TYPE (0U)
148#define NFS4_FLOCK_LOCK_TYPE (1U << 0)
149#define NFS4_POSIX_LOCK_TYPE (1U << 1)
150 union {
151 fl_owner_t posix_owner;
152 pid_t flock_owner;
153 } lo_u;
154};
155
128struct nfs4_lock_state { 156struct nfs4_lock_state {
129 struct list_head ls_locks; /* Other lock stateids */ 157 struct list_head ls_locks; /* Other lock stateids */
130 struct nfs4_state * ls_state; /* Pointer to open state */ 158 struct nfs4_state * ls_state; /* Pointer to open state */
131 fl_owner_t ls_owner; /* POSIX lock owner */
132#define NFS_LOCK_INITIALIZED 1 159#define NFS_LOCK_INITIALIZED 1
133 int ls_flags; 160 int ls_flags;
134 struct nfs_seqid_counter ls_seqid; 161 struct nfs_seqid_counter ls_seqid;
@@ -136,6 +163,7 @@ struct nfs4_lock_state {
136 struct nfs_unique_id ls_id; 163 struct nfs_unique_id ls_id;
137 nfs4_stateid ls_stateid; 164 nfs4_stateid ls_stateid;
138 atomic_t ls_count; 165 atomic_t ls_count;
166 struct nfs4_lock_owner ls_owner;
139}; 167};
140 168
141/* bits for nfs4_state->flags */ 169/* bits for nfs4_state->flags */
@@ -219,11 +247,15 @@ extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nam
219extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); 247extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
220extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, 248extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
221 struct nfs4_fs_locations *fs_locations, struct page *page); 249 struct nfs4_fs_locations *fs_locations, struct page *page);
250extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
222 251
223extern struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[];
224extern struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[];
225#if defined(CONFIG_NFS_V4_1) 252#if defined(CONFIG_NFS_V4_1)
226extern int nfs4_setup_sequence(struct nfs_client *clp, 253static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
254{
255 return server->nfs_client->cl_session;
256}
257
258extern int nfs4_setup_sequence(const struct nfs_server *server,
227 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, 259 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
228 int cache_reply, struct rpc_task *task); 260 int cache_reply, struct rpc_task *task);
229extern void nfs4_destroy_session(struct nfs4_session *session); 261extern void nfs4_destroy_session(struct nfs4_session *session);
@@ -234,7 +266,12 @@ extern int nfs4_init_session(struct nfs_server *server);
234extern int nfs4_proc_get_lease_time(struct nfs_client *clp, 266extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
235 struct nfs_fsinfo *fsinfo); 267 struct nfs_fsinfo *fsinfo);
236#else /* CONFIG_NFS_v4_1 */ 268#else /* CONFIG_NFS_v4_1 */
237static inline int nfs4_setup_sequence(struct nfs_client *clp, 269static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
270{
271 return NULL;
272}
273
274static inline int nfs4_setup_sequence(const struct nfs_server *server,
238 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, 275 struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
239 int cache_reply, struct rpc_task *task) 276 int cache_reply, struct rpc_task *task)
240{ 277{
@@ -247,7 +284,7 @@ static inline int nfs4_init_session(struct nfs_server *server)
247} 284}
248#endif /* CONFIG_NFS_V4_1 */ 285#endif /* CONFIG_NFS_V4_1 */
249 286
250extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[]; 287extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
251 288
252extern const u32 nfs4_fattr_bitmap[2]; 289extern const u32 nfs4_fattr_bitmap[2];
253extern const u32 nfs4_statfs_bitmap[2]; 290extern const u32 nfs4_statfs_bitmap[2];
@@ -284,7 +321,7 @@ extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
284extern void nfs41_handle_recall_slot(struct nfs_client *clp); 321extern void nfs41_handle_recall_slot(struct nfs_client *clp);
285extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 322extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
286extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 323extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
287extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); 324extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
288 325
289extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask); 326extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
290extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task); 327extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 70015dd60a98..7ffbb98ddec3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -303,15 +303,19 @@ do_state_recovery:
303} 303}
304 304
305 305
306static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 306static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
307{ 307{
308 struct nfs_client *clp = server->nfs_client;
309 spin_lock(&clp->cl_lock); 308 spin_lock(&clp->cl_lock);
310 if (time_before(clp->cl_last_renewal,timestamp)) 309 if (time_before(clp->cl_last_renewal,timestamp))
311 clp->cl_last_renewal = timestamp; 310 clp->cl_last_renewal = timestamp;
312 spin_unlock(&clp->cl_lock); 311 spin_unlock(&clp->cl_lock);
313} 312}
314 313
314static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
315{
316 do_renew_lease(server->nfs_client, timestamp);
317}
318
315#if defined(CONFIG_NFS_V4_1) 319#if defined(CONFIG_NFS_V4_1)
316 320
317/* 321/*
@@ -356,7 +360,7 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
356{ 360{
357 struct rpc_task *task; 361 struct rpc_task *task;
358 362
359 if (!test_bit(NFS4CLNT_SESSION_DRAINING, &ses->clp->cl_state)) { 363 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
360 task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq); 364 task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq);
361 if (task) 365 if (task)
362 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 366 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
@@ -370,12 +374,11 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
370 complete(&ses->complete); 374 complete(&ses->complete);
371} 375}
372 376
373static void nfs41_sequence_free_slot(const struct nfs_client *clp, 377static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
374 struct nfs4_sequence_res *res)
375{ 378{
376 struct nfs4_slot_table *tbl; 379 struct nfs4_slot_table *tbl;
377 380
378 tbl = &clp->cl_session->fc_slot_table; 381 tbl = &res->sr_session->fc_slot_table;
379 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) { 382 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
380 /* just wake up the next guy waiting since 383 /* just wake up the next guy waiting since
381 * we may have not consumed a slot after all */ 384 * we may have not consumed a slot after all */
@@ -385,18 +388,17 @@ static void nfs41_sequence_free_slot(const struct nfs_client *clp,
385 388
386 spin_lock(&tbl->slot_tbl_lock); 389 spin_lock(&tbl->slot_tbl_lock);
387 nfs4_free_slot(tbl, res->sr_slotid); 390 nfs4_free_slot(tbl, res->sr_slotid);
388 nfs41_check_drain_session_complete(clp->cl_session); 391 nfs41_check_drain_session_complete(res->sr_session);
389 spin_unlock(&tbl->slot_tbl_lock); 392 spin_unlock(&tbl->slot_tbl_lock);
390 res->sr_slotid = NFS4_MAX_SLOT_TABLE; 393 res->sr_slotid = NFS4_MAX_SLOT_TABLE;
391} 394}
392 395
393static void nfs41_sequence_done(struct nfs_client *clp, 396static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
394 struct nfs4_sequence_res *res,
395 int rpc_status)
396{ 397{
397 unsigned long timestamp; 398 unsigned long timestamp;
398 struct nfs4_slot_table *tbl; 399 struct nfs4_slot_table *tbl;
399 struct nfs4_slot *slot; 400 struct nfs4_slot *slot;
401 struct nfs_client *clp;
400 402
401 /* 403 /*
402 * sr_status remains 1 if an RPC level error occurred. The server 404 * sr_status remains 1 if an RPC level error occurred. The server
@@ -411,25 +413,51 @@ static void nfs41_sequence_done(struct nfs_client *clp,
411 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) 413 if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
412 goto out; 414 goto out;
413 415
416 tbl = &res->sr_session->fc_slot_table;
417 slot = tbl->slots + res->sr_slotid;
418
414 /* Check the SEQUENCE operation status */ 419 /* Check the SEQUENCE operation status */
415 if (res->sr_status == 0) { 420 switch (res->sr_status) {
416 tbl = &clp->cl_session->fc_slot_table; 421 case 0:
417 slot = tbl->slots + res->sr_slotid;
418 /* Update the slot's sequence and clientid lease timer */ 422 /* Update the slot's sequence and clientid lease timer */
419 ++slot->seq_nr; 423 ++slot->seq_nr;
420 timestamp = res->sr_renewal_time; 424 timestamp = res->sr_renewal_time;
421 spin_lock(&clp->cl_lock); 425 clp = res->sr_session->clp;
422 if (time_before(clp->cl_last_renewal, timestamp)) 426 do_renew_lease(clp, timestamp);
423 clp->cl_last_renewal = timestamp;
424 spin_unlock(&clp->cl_lock);
425 /* Check sequence flags */ 427 /* Check sequence flags */
426 if (atomic_read(&clp->cl_count) > 1) 428 if (atomic_read(&clp->cl_count) > 1)
427 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 429 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
430 break;
431 case -NFS4ERR_DELAY:
432 /* The server detected a resend of the RPC call and
433 * returned NFS4ERR_DELAY as per Section 2.10.6.2
434 * of RFC5661.
435 */
436 dprintk("%s: slot=%d seq=%d: Operation in progress\n",
437 __func__, res->sr_slotid, slot->seq_nr);
438 goto out_retry;
439 default:
440 /* Just update the slot sequence no. */
441 ++slot->seq_nr;
428 } 442 }
429out: 443out:
430 /* The session may be reset by one of the error handlers. */ 444 /* The session may be reset by one of the error handlers. */
431 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 445 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
432 nfs41_sequence_free_slot(clp, res); 446 nfs41_sequence_free_slot(res);
447 return 1;
448out_retry:
449 if (!rpc_restart_call(task))
450 goto out;
451 rpc_delay(task, NFS4_POLL_RETRY_MAX);
452 return 0;
453}
454
455static int nfs4_sequence_done(struct rpc_task *task,
456 struct nfs4_sequence_res *res)
457{
458 if (res->sr_session == NULL)
459 return 1;
460 return nfs41_sequence_done(task, res);
433} 461}
434 462
435/* 463/*
@@ -480,12 +508,11 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
480 if (res->sr_slotid != NFS4_MAX_SLOT_TABLE) 508 if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
481 return 0; 509 return 0;
482 510
483 memset(res, 0, sizeof(*res));
484 res->sr_slotid = NFS4_MAX_SLOT_TABLE; 511 res->sr_slotid = NFS4_MAX_SLOT_TABLE;
485 tbl = &session->fc_slot_table; 512 tbl = &session->fc_slot_table;
486 513
487 spin_lock(&tbl->slot_tbl_lock); 514 spin_lock(&tbl->slot_tbl_lock);
488 if (test_bit(NFS4CLNT_SESSION_DRAINING, &session->clp->cl_state) && 515 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
489 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 516 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
490 /* 517 /*
491 * The state manager will wait until the slot table is empty. 518 * The state manager will wait until the slot table is empty.
@@ -525,6 +552,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
525 res->sr_session = session; 552 res->sr_session = session;
526 res->sr_slotid = slotid; 553 res->sr_slotid = slotid;
527 res->sr_renewal_time = jiffies; 554 res->sr_renewal_time = jiffies;
555 res->sr_status_flags = 0;
528 /* 556 /*
529 * sr_status is only set in decode_sequence, and so will remain 557 * sr_status is only set in decode_sequence, and so will remain
530 * set to 1 if an rpc level failure occurs. 558 * set to 1 if an rpc level failure occurs.
@@ -533,33 +561,33 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
533 return 0; 561 return 0;
534} 562}
535 563
536int nfs4_setup_sequence(struct nfs_client *clp, 564int nfs4_setup_sequence(const struct nfs_server *server,
537 struct nfs4_sequence_args *args, 565 struct nfs4_sequence_args *args,
538 struct nfs4_sequence_res *res, 566 struct nfs4_sequence_res *res,
539 int cache_reply, 567 int cache_reply,
540 struct rpc_task *task) 568 struct rpc_task *task)
541{ 569{
570 struct nfs4_session *session = nfs4_get_session(server);
542 int ret = 0; 571 int ret = 0;
543 572
573 if (session == NULL) {
574 args->sa_session = NULL;
575 res->sr_session = NULL;
576 goto out;
577 }
578
544 dprintk("--> %s clp %p session %p sr_slotid %d\n", 579 dprintk("--> %s clp %p session %p sr_slotid %d\n",
545 __func__, clp, clp->cl_session, res->sr_slotid); 580 __func__, session->clp, session, res->sr_slotid);
546 581
547 if (!nfs4_has_session(clp)) 582 ret = nfs41_setup_sequence(session, args, res, cache_reply,
548 goto out;
549 ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
550 task); 583 task);
551 if (ret && ret != -EAGAIN) {
552 /* terminate rpc task */
553 task->tk_status = ret;
554 task->tk_action = NULL;
555 }
556out: 584out:
557 dprintk("<-- %s status=%d\n", __func__, ret); 585 dprintk("<-- %s status=%d\n", __func__, ret);
558 return ret; 586 return ret;
559} 587}
560 588
561struct nfs41_call_sync_data { 589struct nfs41_call_sync_data {
562 struct nfs_client *clp; 590 const struct nfs_server *seq_server;
563 struct nfs4_sequence_args *seq_args; 591 struct nfs4_sequence_args *seq_args;
564 struct nfs4_sequence_res *seq_res; 592 struct nfs4_sequence_res *seq_res;
565 int cache_reply; 593 int cache_reply;
@@ -569,9 +597,9 @@ static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
569{ 597{
570 struct nfs41_call_sync_data *data = calldata; 598 struct nfs41_call_sync_data *data = calldata;
571 599
572 dprintk("--> %s data->clp->cl_session %p\n", __func__, 600 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
573 data->clp->cl_session); 601
574 if (nfs4_setup_sequence(data->clp, data->seq_args, 602 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
575 data->seq_res, data->cache_reply, task)) 603 data->seq_res, data->cache_reply, task))
576 return; 604 return;
577 rpc_call_start(task); 605 rpc_call_start(task);
@@ -587,7 +615,7 @@ static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
587{ 615{
588 struct nfs41_call_sync_data *data = calldata; 616 struct nfs41_call_sync_data *data = calldata;
589 617
590 nfs41_sequence_done(data->clp, data->seq_res, task->tk_status); 618 nfs41_sequence_done(task, data->seq_res);
591} 619}
592 620
593struct rpc_call_ops nfs41_call_sync_ops = { 621struct rpc_call_ops nfs41_call_sync_ops = {
@@ -600,8 +628,7 @@ struct rpc_call_ops nfs41_call_priv_sync_ops = {
600 .rpc_call_done = nfs41_call_sync_done, 628 .rpc_call_done = nfs41_call_sync_done,
601}; 629};
602 630
603static int nfs4_call_sync_sequence(struct nfs_client *clp, 631static int nfs4_call_sync_sequence(struct nfs_server *server,
604 struct rpc_clnt *clnt,
605 struct rpc_message *msg, 632 struct rpc_message *msg,
606 struct nfs4_sequence_args *args, 633 struct nfs4_sequence_args *args,
607 struct nfs4_sequence_res *res, 634 struct nfs4_sequence_res *res,
@@ -611,13 +638,13 @@ static int nfs4_call_sync_sequence(struct nfs_client *clp,
611 int ret; 638 int ret;
612 struct rpc_task *task; 639 struct rpc_task *task;
613 struct nfs41_call_sync_data data = { 640 struct nfs41_call_sync_data data = {
614 .clp = clp, 641 .seq_server = server,
615 .seq_args = args, 642 .seq_args = args,
616 .seq_res = res, 643 .seq_res = res,
617 .cache_reply = cache_reply, 644 .cache_reply = cache_reply,
618 }; 645 };
619 struct rpc_task_setup task_setup = { 646 struct rpc_task_setup task_setup = {
620 .rpc_client = clnt, 647 .rpc_client = server->client,
621 .rpc_message = msg, 648 .rpc_message = msg,
622 .callback_ops = &nfs41_call_sync_ops, 649 .callback_ops = &nfs41_call_sync_ops,
623 .callback_data = &data 650 .callback_data = &data
@@ -642,10 +669,15 @@ int _nfs4_call_sync_session(struct nfs_server *server,
642 struct nfs4_sequence_res *res, 669 struct nfs4_sequence_res *res,
643 int cache_reply) 670 int cache_reply)
644{ 671{
645 return nfs4_call_sync_sequence(server->nfs_client, server->client, 672 return nfs4_call_sync_sequence(server, msg, args, res, cache_reply, 0);
646 msg, args, res, cache_reply, 0);
647} 673}
648 674
675#else
676static int nfs4_sequence_done(struct rpc_task *task,
677 struct nfs4_sequence_res *res)
678{
679 return 1;
680}
649#endif /* CONFIG_NFS_V4_1 */ 681#endif /* CONFIG_NFS_V4_1 */
650 682
651int _nfs4_call_sync(struct nfs_server *server, 683int _nfs4_call_sync(struct nfs_server *server,
@@ -659,18 +691,9 @@ int _nfs4_call_sync(struct nfs_server *server,
659} 691}
660 692
661#define nfs4_call_sync(server, msg, args, res, cache_reply) \ 693#define nfs4_call_sync(server, msg, args, res, cache_reply) \
662 (server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \ 694 (server)->nfs_client->cl_mvops->call_sync((server), (msg), &(args)->seq_args, \
663 &(res)->seq_res, (cache_reply)) 695 &(res)->seq_res, (cache_reply))
664 696
665static void nfs4_sequence_done(const struct nfs_server *server,
666 struct nfs4_sequence_res *res, int rpc_status)
667{
668#ifdef CONFIG_NFS_V4_1
669 if (nfs4_has_session(server->nfs_client))
670 nfs41_sequence_done(server->nfs_client, res, rpc_status);
671#endif /* CONFIG_NFS_V4_1 */
672}
673
674static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 697static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
675{ 698{
676 struct nfs_inode *nfsi = NFS_I(dir); 699 struct nfs_inode *nfsi = NFS_I(dir);
@@ -745,19 +768,14 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
745 p->o_arg.server = server; 768 p->o_arg.server = server;
746 p->o_arg.bitmask = server->attr_bitmask; 769 p->o_arg.bitmask = server->attr_bitmask;
747 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 770 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
748 if (flags & O_EXCL) { 771 if (flags & O_CREAT) {
749 if (nfs4_has_persistent_session(server->nfs_client)) { 772 u32 *s;
750 /* GUARDED */ 773
751 p->o_arg.u.attrs = &p->attrs;
752 memcpy(&p->attrs, attrs, sizeof(p->attrs));
753 } else { /* EXCLUSIVE4_1 */
754 u32 *s = (u32 *) p->o_arg.u.verifier.data;
755 s[0] = jiffies;
756 s[1] = current->pid;
757 }
758 } else if (flags & O_CREAT) {
759 p->o_arg.u.attrs = &p->attrs; 774 p->o_arg.u.attrs = &p->attrs;
760 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 775 memcpy(&p->attrs, attrs, sizeof(p->attrs));
776 s = (u32 *) p->o_arg.u.verifier.data;
777 s[0] = jiffies;
778 s[1] = current->pid;
761 } 779 }
762 p->c_arg.fh = &p->o_res.fh; 780 p->c_arg.fh = &p->o_res.fh;
763 p->c_arg.stateid = &p->o_res.stateid; 781 p->c_arg.stateid = &p->o_res.stateid;
@@ -1255,8 +1273,6 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1255 struct nfs4_opendata *data = calldata; 1273 struct nfs4_opendata *data = calldata;
1256 1274
1257 data->rpc_status = task->tk_status; 1275 data->rpc_status = task->tk_status;
1258 if (RPC_ASSASSINATED(task))
1259 return;
1260 if (data->rpc_status == 0) { 1276 if (data->rpc_status == 0) {
1261 memcpy(data->o_res.stateid.data, data->c_res.stateid.data, 1277 memcpy(data->o_res.stateid.data, data->c_res.stateid.data,
1262 sizeof(data->o_res.stateid.data)); 1278 sizeof(data->o_res.stateid.data));
@@ -1356,13 +1372,13 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1356 } 1372 }
1357 /* Update sequence id. */ 1373 /* Update sequence id. */
1358 data->o_arg.id = sp->so_owner_id.id; 1374 data->o_arg.id = sp->so_owner_id.id;
1359 data->o_arg.clientid = sp->so_client->cl_clientid; 1375 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1360 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { 1376 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1361 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1377 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1362 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1378 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1363 } 1379 }
1364 data->timestamp = jiffies; 1380 data->timestamp = jiffies;
1365 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 1381 if (nfs4_setup_sequence(data->o_arg.server,
1366 &data->o_arg.seq_args, 1382 &data->o_arg.seq_args,
1367 &data->o_res.seq_res, 1, task)) 1383 &data->o_res.seq_res, 1, task))
1368 return; 1384 return;
@@ -1385,11 +1401,9 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
1385 1401
1386 data->rpc_status = task->tk_status; 1402 data->rpc_status = task->tk_status;
1387 1403
1388 nfs4_sequence_done(data->o_arg.server, &data->o_res.seq_res, 1404 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1389 task->tk_status);
1390
1391 if (RPC_ASSASSINATED(task))
1392 return; 1405 return;
1406
1393 if (task->tk_status == 0) { 1407 if (task->tk_status == 0) {
1394 switch (data->o_res.f_attr->mode & S_IFMT) { 1408 switch (data->o_res.f_attr->mode & S_IFMT) {
1395 case S_IFREG: 1409 case S_IFREG:
@@ -1773,7 +1787,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1773 if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) { 1787 if (nfs4_copy_delegation_stateid(&arg.stateid, inode)) {
1774 /* Use that stateid */ 1788 /* Use that stateid */
1775 } else if (state != NULL) { 1789 } else if (state != NULL) {
1776 nfs4_copy_stateid(&arg.stateid, state, current->files); 1790 nfs4_copy_stateid(&arg.stateid, state, current->files, current->tgid);
1777 } else 1791 } else
1778 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid)); 1792 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
1779 1793
@@ -1838,8 +1852,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
1838 struct nfs4_state *state = calldata->state; 1852 struct nfs4_state *state = calldata->state;
1839 struct nfs_server *server = NFS_SERVER(calldata->inode); 1853 struct nfs_server *server = NFS_SERVER(calldata->inode);
1840 1854
1841 nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status); 1855 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
1842 if (RPC_ASSASSINATED(task))
1843 return; 1856 return;
1844 /* hmm. we are done with the inode, and in the process of freeing 1857 /* hmm. we are done with the inode, and in the process of freeing
1845 * the state_owner. we keep this around to process errors 1858 * the state_owner. we keep this around to process errors
@@ -1903,7 +1916,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
1903 1916
1904 nfs_fattr_init(calldata->res.fattr); 1917 nfs_fattr_init(calldata->res.fattr);
1905 calldata->timestamp = jiffies; 1918 calldata->timestamp = jiffies;
1906 if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client, 1919 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
1907 &calldata->arg.seq_args, &calldata->res.seq_res, 1920 &calldata->arg.seq_args, &calldata->res.seq_res,
1908 1, task)) 1921 1, task))
1909 return; 1922 return;
@@ -2648,7 +2661,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2648{ 2661{
2649 struct nfs_removeres *res = task->tk_msg.rpc_resp; 2662 struct nfs_removeres *res = task->tk_msg.rpc_resp;
2650 2663
2651 nfs4_sequence_done(res->server, &res->seq_res, task->tk_status); 2664 if (!nfs4_sequence_done(task, &res->seq_res))
2665 return 0;
2652 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2666 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2653 return 0; 2667 return 0;
2654 update_changeattr(dir, &res->cinfo); 2668 update_changeattr(dir, &res->cinfo);
@@ -3093,7 +3107,8 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3093 3107
3094 dprintk("--> %s\n", __func__); 3108 dprintk("--> %s\n", __func__);
3095 3109
3096 nfs4_sequence_done(server, &data->res.seq_res, task->tk_status); 3110 if (!nfs4_sequence_done(task, &data->res.seq_res))
3111 return -EAGAIN;
3097 3112
3098 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3113 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3099 nfs_restart_rpc(task, server->nfs_client); 3114 nfs_restart_rpc(task, server->nfs_client);
@@ -3116,8 +3131,8 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3116{ 3131{
3117 struct inode *inode = data->inode; 3132 struct inode *inode = data->inode;
3118 3133
3119 nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res, 3134 if (!nfs4_sequence_done(task, &data->res.seq_res))
3120 task->tk_status); 3135 return -EAGAIN;
3121 3136
3122 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3137 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3123 nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client); 3138 nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
@@ -3145,8 +3160,9 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
3145{ 3160{
3146 struct inode *inode = data->inode; 3161 struct inode *inode = data->inode;
3147 3162
3148 nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res, 3163 if (!nfs4_sequence_done(task, &data->res.seq_res))
3149 task->tk_status); 3164 return -EAGAIN;
3165
3150 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3166 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3151 nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client); 3167 nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
3152 return -EAGAIN; 3168 return -EAGAIN;
@@ -3196,10 +3212,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3196 nfs4_schedule_state_recovery(clp); 3212 nfs4_schedule_state_recovery(clp);
3197 return; 3213 return;
3198 } 3214 }
3199 spin_lock(&clp->cl_lock); 3215 do_renew_lease(clp, timestamp);
3200 if (time_before(clp->cl_last_renewal,timestamp))
3201 clp->cl_last_renewal = timestamp;
3202 spin_unlock(&clp->cl_lock);
3203} 3216}
3204 3217
3205static const struct rpc_call_ops nfs4_renew_ops = { 3218static const struct rpc_call_ops nfs4_renew_ops = {
@@ -3240,10 +3253,7 @@ int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3240 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3253 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3241 if (status < 0) 3254 if (status < 0)
3242 return status; 3255 return status;
3243 spin_lock(&clp->cl_lock); 3256 do_renew_lease(clp, now);
3244 if (time_before(clp->cl_last_renewal,now))
3245 clp->cl_last_renewal = now;
3246 spin_unlock(&clp->cl_lock);
3247 return 0; 3257 return 0;
3248} 3258}
3249 3259
@@ -3464,9 +3474,11 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
3464} 3474}
3465 3475
3466static int 3476static int
3467_nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state) 3477nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3468{ 3478{
3469 if (!clp || task->tk_status >= 0) 3479 struct nfs_client *clp = server->nfs_client;
3480
3481 if (task->tk_status >= 0)
3470 return 0; 3482 return 0;
3471 switch(task->tk_status) { 3483 switch(task->tk_status) {
3472 case -NFS4ERR_ADMIN_REVOKED: 3484 case -NFS4ERR_ADMIN_REVOKED:
@@ -3498,8 +3510,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3498 return -EAGAIN; 3510 return -EAGAIN;
3499#endif /* CONFIG_NFS_V4_1 */ 3511#endif /* CONFIG_NFS_V4_1 */
3500 case -NFS4ERR_DELAY: 3512 case -NFS4ERR_DELAY:
3501 if (server) 3513 nfs_inc_server_stats(server, NFSIOS_DELAY);
3502 nfs_inc_server_stats(server, NFSIOS_DELAY);
3503 case -NFS4ERR_GRACE: 3514 case -NFS4ERR_GRACE:
3504 case -EKEYEXPIRED: 3515 case -EKEYEXPIRED:
3505 rpc_delay(task, NFS4_POLL_RETRY_MAX); 3516 rpc_delay(task, NFS4_POLL_RETRY_MAX);
@@ -3520,12 +3531,6 @@ do_state_recovery:
3520 return -EAGAIN; 3531 return -EAGAIN;
3521} 3532}
3522 3533
3523static int
3524nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3525{
3526 return _nfs4_async_handle_error(task, server, server->nfs_client, state);
3527}
3528
3529int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 3534int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3530 unsigned short port, struct rpc_cred *cred, 3535 unsigned short port, struct rpc_cred *cred,
3531 struct nfs4_setclientid_res *res) 3536 struct nfs4_setclientid_res *res)
@@ -3641,8 +3646,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
3641{ 3646{
3642 struct nfs4_delegreturndata *data = calldata; 3647 struct nfs4_delegreturndata *data = calldata;
3643 3648
3644 nfs4_sequence_done(data->res.server, &data->res.seq_res, 3649 if (!nfs4_sequence_done(task, &data->res.seq_res))
3645 task->tk_status); 3650 return;
3646 3651
3647 switch (task->tk_status) { 3652 switch (task->tk_status) {
3648 case -NFS4ERR_STALE_STATEID: 3653 case -NFS4ERR_STALE_STATEID:
@@ -3672,7 +3677,7 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
3672 3677
3673 d_data = (struct nfs4_delegreturndata *)data; 3678 d_data = (struct nfs4_delegreturndata *)data;
3674 3679
3675 if (nfs4_setup_sequence(d_data->res.server->nfs_client, 3680 if (nfs4_setup_sequence(d_data->res.server,
3676 &d_data->args.seq_args, 3681 &d_data->args.seq_args,
3677 &d_data->res.seq_res, 1, task)) 3682 &d_data->res.seq_res, 1, task))
3678 return; 3683 return;
@@ -3892,9 +3897,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
3892{ 3897{
3893 struct nfs4_unlockdata *calldata = data; 3898 struct nfs4_unlockdata *calldata = data;
3894 3899
3895 nfs4_sequence_done(calldata->server, &calldata->res.seq_res, 3900 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3896 task->tk_status);
3897 if (RPC_ASSASSINATED(task))
3898 return; 3901 return;
3899 switch (task->tk_status) { 3902 switch (task->tk_status) {
3900 case 0: 3903 case 0:
@@ -3927,7 +3930,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
3927 return; 3930 return;
3928 } 3931 }
3929 calldata->timestamp = jiffies; 3932 calldata->timestamp = jiffies;
3930 if (nfs4_setup_sequence(calldata->server->nfs_client, 3933 if (nfs4_setup_sequence(calldata->server,
3931 &calldata->arg.seq_args, 3934 &calldata->arg.seq_args,
3932 &calldata->res.seq_res, 1, task)) 3935 &calldata->res.seq_res, 1, task))
3933 return; 3936 return;
@@ -4082,7 +4085,8 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4082 } else 4085 } else
4083 data->arg.new_lock_owner = 0; 4086 data->arg.new_lock_owner = 0;
4084 data->timestamp = jiffies; 4087 data->timestamp = jiffies;
4085 if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args, 4088 if (nfs4_setup_sequence(data->server,
4089 &data->arg.seq_args,
4086 &data->res.seq_res, 1, task)) 4090 &data->res.seq_res, 1, task))
4087 return; 4091 return;
4088 rpc_call_start(task); 4092 rpc_call_start(task);
@@ -4101,12 +4105,10 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4101 4105
4102 dprintk("%s: begin!\n", __func__); 4106 dprintk("%s: begin!\n", __func__);
4103 4107
4104 nfs4_sequence_done(data->server, &data->res.seq_res, 4108 if (!nfs4_sequence_done(task, &data->res.seq_res))
4105 task->tk_status); 4109 return;
4106 4110
4107 data->rpc_status = task->tk_status; 4111 data->rpc_status = task->tk_status;
4108 if (RPC_ASSASSINATED(task))
4109 goto out;
4110 if (data->arg.new_lock_owner != 0) { 4112 if (data->arg.new_lock_owner != 0) {
4111 if (data->rpc_status == 0) 4113 if (data->rpc_status == 0)
4112 nfs_confirm_seqid(&data->lsp->ls_seqid, 0); 4114 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
@@ -4424,6 +4426,34 @@ out:
4424 return err; 4426 return err;
4425} 4427}
4426 4428
4429static void nfs4_release_lockowner_release(void *calldata)
4430{
4431 kfree(calldata);
4432}
4433
4434const struct rpc_call_ops nfs4_release_lockowner_ops = {
4435 .rpc_release = nfs4_release_lockowner_release,
4436};
4437
4438void nfs4_release_lockowner(const struct nfs4_lock_state *lsp)
4439{
4440 struct nfs_server *server = lsp->ls_state->owner->so_server;
4441 struct nfs_release_lockowner_args *args;
4442 struct rpc_message msg = {
4443 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4444 };
4445
4446 if (server->nfs_client->cl_mvops->minor_version != 0)
4447 return;
4448 args = kmalloc(sizeof(*args), GFP_NOFS);
4449 if (!args)
4450 return;
4451 args->lock_owner.clientid = server->nfs_client->cl_clientid;
4452 args->lock_owner.id = lsp->ls_id.id;
4453 msg.rpc_argp = args;
4454 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
4455}
4456
4427#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 4457#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4428 4458
4429int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf, 4459int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
@@ -4611,7 +4641,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
4611 (struct nfs4_get_lease_time_data *)calldata; 4641 (struct nfs4_get_lease_time_data *)calldata;
4612 4642
4613 dprintk("--> %s\n", __func__); 4643 dprintk("--> %s\n", __func__);
4614 nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status); 4644 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
4645 return;
4615 switch (task->tk_status) { 4646 switch (task->tk_status) {
4616 case -NFS4ERR_DELAY: 4647 case -NFS4ERR_DELAY:
4617 case -NFS4ERR_GRACE: 4648 case -NFS4ERR_GRACE:
@@ -4805,13 +4836,6 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
4805 if (!session) 4836 if (!session)
4806 return NULL; 4837 return NULL;
4807 4838
4808 /*
4809 * The create session reply races with the server back
4810 * channel probe. Mark the client NFS_CS_SESSION_INITING
4811 * so that the client back channel can find the
4812 * nfs_client struct
4813 */
4814 clp->cl_cons_state = NFS_CS_SESSION_INITING;
4815 init_completion(&session->complete); 4839 init_completion(&session->complete);
4816 4840
4817 tbl = &session->fc_slot_table; 4841 tbl = &session->fc_slot_table;
@@ -4824,6 +4848,8 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
4824 spin_lock_init(&tbl->slot_tbl_lock); 4848 spin_lock_init(&tbl->slot_tbl_lock);
4825 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 4849 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
4826 4850
4851 session->session_state = 1<<NFS4_SESSION_INITING;
4852
4827 session->clp = clp; 4853 session->clp = clp;
4828 return session; 4854 return session;
4829} 4855}
@@ -5040,6 +5066,10 @@ int nfs4_init_session(struct nfs_server *server)
5040 if (!nfs4_has_session(clp)) 5066 if (!nfs4_has_session(clp))
5041 return 0; 5067 return 0;
5042 5068
5069 session = clp->cl_session;
5070 if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
5071 return 0;
5072
5043 rsize = server->rsize; 5073 rsize = server->rsize;
5044 if (rsize == 0) 5074 if (rsize == 0)
5045 rsize = NFS_MAX_FILE_IO_SIZE; 5075 rsize = NFS_MAX_FILE_IO_SIZE;
@@ -5047,7 +5077,6 @@ int nfs4_init_session(struct nfs_server *server)
5047 if (wsize == 0) 5077 if (wsize == 0)
5048 wsize = NFS_MAX_FILE_IO_SIZE; 5078 wsize = NFS_MAX_FILE_IO_SIZE;
5049 5079
5050 session = clp->cl_session;
5051 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; 5080 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5052 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; 5081 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5053 5082
@@ -5060,69 +5089,70 @@ int nfs4_init_session(struct nfs_server *server)
5060/* 5089/*
5061 * Renew the cl_session lease. 5090 * Renew the cl_session lease.
5062 */ 5091 */
5063static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 5092struct nfs4_sequence_data {
5064{ 5093 struct nfs_client *clp;
5065 struct nfs4_sequence_args args; 5094 struct nfs4_sequence_args args;
5066 struct nfs4_sequence_res res; 5095 struct nfs4_sequence_res res;
5067 5096};
5068 struct rpc_message msg = {
5069 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5070 .rpc_argp = &args,
5071 .rpc_resp = &res,
5072 .rpc_cred = cred,
5073 };
5074
5075 args.sa_cache_this = 0;
5076
5077 return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
5078 &res, args.sa_cache_this, 1);
5079}
5080 5097
5081static void nfs41_sequence_release(void *data) 5098static void nfs41_sequence_release(void *data)
5082{ 5099{
5083 struct nfs_client *clp = (struct nfs_client *)data; 5100 struct nfs4_sequence_data *calldata = data;
5101 struct nfs_client *clp = calldata->clp;
5084 5102
5085 if (atomic_read(&clp->cl_count) > 1) 5103 if (atomic_read(&clp->cl_count) > 1)
5086 nfs4_schedule_state_renewal(clp); 5104 nfs4_schedule_state_renewal(clp);
5087 nfs_put_client(clp); 5105 nfs_put_client(clp);
5106 kfree(calldata);
5107}
5108
5109static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5110{
5111 switch(task->tk_status) {
5112 case -NFS4ERR_DELAY:
5113 case -EKEYEXPIRED:
5114 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5115 return -EAGAIN;
5116 default:
5117 nfs4_schedule_state_recovery(clp);
5118 }
5119 return 0;
5088} 5120}
5089 5121
5090static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5122static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5091{ 5123{
5092 struct nfs_client *clp = (struct nfs_client *)data; 5124 struct nfs4_sequence_data *calldata = data;
5125 struct nfs_client *clp = calldata->clp;
5093 5126
5094 nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status); 5127 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5128 return;
5095 5129
5096 if (task->tk_status < 0) { 5130 if (task->tk_status < 0) {
5097 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5131 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5098 if (atomic_read(&clp->cl_count) == 1) 5132 if (atomic_read(&clp->cl_count) == 1)
5099 goto out; 5133 goto out;
5100 5134
5101 if (_nfs4_async_handle_error(task, NULL, clp, NULL) 5135 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5102 == -EAGAIN) { 5136 rpc_restart_call_prepare(task);
5103 nfs_restart_rpc(task, clp);
5104 return; 5137 return;
5105 } 5138 }
5106 } 5139 }
5107 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5140 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5108out: 5141out:
5109 kfree(task->tk_msg.rpc_argp);
5110 kfree(task->tk_msg.rpc_resp);
5111
5112 dprintk("<-- %s\n", __func__); 5142 dprintk("<-- %s\n", __func__);
5113} 5143}
5114 5144
5115static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 5145static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5116{ 5146{
5117 struct nfs_client *clp; 5147 struct nfs4_sequence_data *calldata = data;
5148 struct nfs_client *clp = calldata->clp;
5118 struct nfs4_sequence_args *args; 5149 struct nfs4_sequence_args *args;
5119 struct nfs4_sequence_res *res; 5150 struct nfs4_sequence_res *res;
5120 5151
5121 clp = (struct nfs_client *)data;
5122 args = task->tk_msg.rpc_argp; 5152 args = task->tk_msg.rpc_argp;
5123 res = task->tk_msg.rpc_resp; 5153 res = task->tk_msg.rpc_resp;
5124 5154
5125 if (nfs4_setup_sequence(clp, args, res, 0, task)) 5155 if (nfs41_setup_sequence(clp->cl_session, args, res, 0, task))
5126 return; 5156 return;
5127 rpc_call_start(task); 5157 rpc_call_start(task);
5128} 5158}
@@ -5133,32 +5163,67 @@ static const struct rpc_call_ops nfs41_sequence_ops = {
5133 .rpc_release = nfs41_sequence_release, 5163 .rpc_release = nfs41_sequence_release,
5134}; 5164};
5135 5165
5136static int nfs41_proc_async_sequence(struct nfs_client *clp, 5166static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5137 struct rpc_cred *cred)
5138{ 5167{
5139 struct nfs4_sequence_args *args; 5168 struct nfs4_sequence_data *calldata;
5140 struct nfs4_sequence_res *res;
5141 struct rpc_message msg = { 5169 struct rpc_message msg = {
5142 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 5170 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5143 .rpc_cred = cred, 5171 .rpc_cred = cred,
5144 }; 5172 };
5173 struct rpc_task_setup task_setup_data = {
5174 .rpc_client = clp->cl_rpcclient,
5175 .rpc_message = &msg,
5176 .callback_ops = &nfs41_sequence_ops,
5177 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5178 };
5145 5179
5146 if (!atomic_inc_not_zero(&clp->cl_count)) 5180 if (!atomic_inc_not_zero(&clp->cl_count))
5147 return -EIO; 5181 return ERR_PTR(-EIO);
5148 args = kzalloc(sizeof(*args), GFP_NOFS); 5182 calldata = kmalloc(sizeof(*calldata), GFP_NOFS);
5149 res = kzalloc(sizeof(*res), GFP_NOFS); 5183 if (calldata == NULL) {
5150 if (!args || !res) {
5151 kfree(args);
5152 kfree(res);
5153 nfs_put_client(clp); 5184 nfs_put_client(clp);
5154 return -ENOMEM; 5185 return ERR_PTR(-ENOMEM);
5155 } 5186 }
5156 res->sr_slotid = NFS4_MAX_SLOT_TABLE; 5187 calldata->res.sr_slotid = NFS4_MAX_SLOT_TABLE;
5157 msg.rpc_argp = args; 5188 msg.rpc_argp = &calldata->args;
5158 msg.rpc_resp = res; 5189 msg.rpc_resp = &calldata->res;
5190 calldata->clp = clp;
5191 task_setup_data.callback_data = calldata;
5159 5192
5160 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 5193 return rpc_run_task(&task_setup_data);
5161 &nfs41_sequence_ops, (void *)clp); 5194}
5195
5196static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5197{
5198 struct rpc_task *task;
5199 int ret = 0;
5200
5201 task = _nfs41_proc_sequence(clp, cred);
5202 if (IS_ERR(task))
5203 ret = PTR_ERR(task);
5204 else
5205 rpc_put_task(task);
5206 dprintk("<-- %s status=%d\n", __func__, ret);
5207 return ret;
5208}
5209
5210static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5211{
5212 struct rpc_task *task;
5213 int ret;
5214
5215 task = _nfs41_proc_sequence(clp, cred);
5216 if (IS_ERR(task)) {
5217 ret = PTR_ERR(task);
5218 goto out;
5219 }
5220 ret = rpc_wait_for_completion_task(task);
5221 if (!ret)
5222 ret = task->tk_status;
5223 rpc_put_task(task);
5224out:
5225 dprintk("<-- %s status=%d\n", __func__, ret);
5226 return ret;
5162} 5227}
5163 5228
5164struct nfs4_reclaim_complete_data { 5229struct nfs4_reclaim_complete_data {
@@ -5172,13 +5237,31 @@ static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
5172 struct nfs4_reclaim_complete_data *calldata = data; 5237 struct nfs4_reclaim_complete_data *calldata = data;
5173 5238
5174 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 5239 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5175 if (nfs4_setup_sequence(calldata->clp, &calldata->arg.seq_args, 5240 if (nfs41_setup_sequence(calldata->clp->cl_session,
5241 &calldata->arg.seq_args,
5176 &calldata->res.seq_res, 0, task)) 5242 &calldata->res.seq_res, 0, task))
5177 return; 5243 return;
5178 5244
5179 rpc_call_start(task); 5245 rpc_call_start(task);
5180} 5246}
5181 5247
5248static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5249{
5250 switch(task->tk_status) {
5251 case 0:
5252 case -NFS4ERR_COMPLETE_ALREADY:
5253 case -NFS4ERR_WRONG_CRED: /* What to do here? */
5254 break;
5255 case -NFS4ERR_DELAY:
5256 case -EKEYEXPIRED:
5257 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5258 return -EAGAIN;
5259 default:
5260 nfs4_schedule_state_recovery(clp);
5261 }
5262 return 0;
5263}
5264
5182static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 5265static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5183{ 5266{
5184 struct nfs4_reclaim_complete_data *calldata = data; 5267 struct nfs4_reclaim_complete_data *calldata = data;
@@ -5186,32 +5269,13 @@ static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
5186 struct nfs4_sequence_res *res = &calldata->res.seq_res; 5269 struct nfs4_sequence_res *res = &calldata->res.seq_res;
5187 5270
5188 dprintk("--> %s\n", __func__); 5271 dprintk("--> %s\n", __func__);
5189 nfs41_sequence_done(clp, res, task->tk_status); 5272 if (!nfs41_sequence_done(task, res))
5190 switch (task->tk_status) { 5273 return;
5191 case 0:
5192 case -NFS4ERR_COMPLETE_ALREADY:
5193 break;
5194 case -NFS4ERR_BADSESSION:
5195 case -NFS4ERR_DEADSESSION:
5196 /*
5197 * Handle the session error, but do not retry the operation, as
5198 * we have no way of telling whether the clientid had to be
5199 * reset before we got our reply. If reset, a new wave of
5200 * reclaim operations will follow, containing their own reclaim
5201 * complete. We don't want our retry to get on the way of
5202 * recovery by incorrectly indicating to the server that we're
5203 * done reclaiming state since the process had to be restarted.
5204 */
5205 _nfs4_async_handle_error(task, NULL, clp, NULL);
5206 break;
5207 default:
5208 if (_nfs4_async_handle_error(
5209 task, NULL, clp, NULL) == -EAGAIN) {
5210 rpc_restart_call_prepare(task);
5211 return;
5212 }
5213 }
5214 5274
5275 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
5276 rpc_restart_call_prepare(task);
5277 return;
5278 }
5215 dprintk("<-- %s\n", __func__); 5279 dprintk("<-- %s\n", __func__);
5216} 5280}
5217 5281
@@ -5325,28 +5389,30 @@ struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
5325}; 5389};
5326#endif 5390#endif
5327 5391
5328/* 5392static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
5329 * Per minor version reboot and network partition recovery ops 5393 .minor_version = 0,
5330 */ 5394 .call_sync = _nfs4_call_sync,
5331 5395 .validate_stateid = nfs4_validate_delegation_stateid,
5332struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = { 5396 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
5333 &nfs40_reboot_recovery_ops, 5397 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
5334#if defined(CONFIG_NFS_V4_1) 5398 .state_renewal_ops = &nfs40_state_renewal_ops,
5335 &nfs41_reboot_recovery_ops,
5336#endif
5337}; 5399};
5338 5400
5339struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = {
5340 &nfs40_nograce_recovery_ops,
5341#if defined(CONFIG_NFS_V4_1) 5401#if defined(CONFIG_NFS_V4_1)
5342 &nfs41_nograce_recovery_ops, 5402static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
5343#endif 5403 .minor_version = 1,
5404 .call_sync = _nfs4_call_sync_session,
5405 .validate_stateid = nfs41_validate_delegation_stateid,
5406 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
5407 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
5408 .state_renewal_ops = &nfs41_state_renewal_ops,
5344}; 5409};
5410#endif
5345 5411
5346struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = { 5412const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
5347 &nfs40_state_renewal_ops, 5413 [0] = &nfs_v4_0_minor_ops,
5348#if defined(CONFIG_NFS_V4_1) 5414#if defined(CONFIG_NFS_V4_1)
5349 &nfs41_state_renewal_ops, 5415 [1] = &nfs_v4_1_minor_ops,
5350#endif 5416#endif
5351}; 5417};
5352 5418
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index d87f10327b72..72b6c580af13 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -54,14 +54,14 @@
54void 54void
55nfs4_renew_state(struct work_struct *work) 55nfs4_renew_state(struct work_struct *work)
56{ 56{
57 struct nfs4_state_maintenance_ops *ops; 57 const struct nfs4_state_maintenance_ops *ops;
58 struct nfs_client *clp = 58 struct nfs_client *clp =
59 container_of(work, struct nfs_client, cl_renewd.work); 59 container_of(work, struct nfs_client, cl_renewd.work);
60 struct rpc_cred *cred; 60 struct rpc_cred *cred;
61 long lease; 61 long lease;
62 unsigned long last, now; 62 unsigned long last, now;
63 63
64 ops = nfs4_state_renewal_ops[clp->cl_minorversion]; 64 ops = clp->cl_mvops->state_renewal_ops;
65 dprintk("%s: start\n", __func__); 65 dprintk("%s: start\n", __func__);
66 /* Are there any active superblocks? */ 66 /* Are there any active superblocks? */
67 if (list_empty(&clp->cl_superblocks)) 67 if (list_empty(&clp->cl_superblocks))
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 34acf5926fdc..3e2f19b04c06 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -145,7 +145,9 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
145 struct nfs4_session *ses = clp->cl_session; 145 struct nfs4_session *ses = clp->cl_session;
146 int max_slots; 146 int max_slots;
147 147
148 if (test_and_clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) { 148 if (ses == NULL)
149 return;
150 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
149 spin_lock(&ses->fc_slot_table.slot_tbl_lock); 151 spin_lock(&ses->fc_slot_table.slot_tbl_lock);
150 max_slots = ses->fc_slot_table.max_slots; 152 max_slots = ses->fc_slot_table.max_slots;
151 while (max_slots--) { 153 while (max_slots--) {
@@ -167,7 +169,7 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
167 struct nfs4_slot_table *tbl = &ses->fc_slot_table; 169 struct nfs4_slot_table *tbl = &ses->fc_slot_table;
168 170
169 spin_lock(&tbl->slot_tbl_lock); 171 spin_lock(&tbl->slot_tbl_lock);
170 set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state); 172 set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
171 if (tbl->highest_used_slotid != -1) { 173 if (tbl->highest_used_slotid != -1) {
172 INIT_COMPLETION(ses->complete); 174 INIT_COMPLETION(ses->complete);
173 spin_unlock(&tbl->slot_tbl_lock); 175 spin_unlock(&tbl->slot_tbl_lock);
@@ -371,7 +373,6 @@ nfs4_alloc_state_owner(void)
371 return NULL; 373 return NULL;
372 spin_lock_init(&sp->so_lock); 374 spin_lock_init(&sp->so_lock);
373 INIT_LIST_HEAD(&sp->so_states); 375 INIT_LIST_HEAD(&sp->so_states);
374 INIT_LIST_HEAD(&sp->so_delegations);
375 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); 376 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
376 sp->so_seqid.sequence = &sp->so_sequence; 377 sp->so_seqid.sequence = &sp->so_sequence;
377 spin_lock_init(&sp->so_sequence.lock); 378 spin_lock_init(&sp->so_sequence.lock);
@@ -384,7 +385,7 @@ static void
384nfs4_drop_state_owner(struct nfs4_state_owner *sp) 385nfs4_drop_state_owner(struct nfs4_state_owner *sp)
385{ 386{
386 if (!RB_EMPTY_NODE(&sp->so_client_node)) { 387 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
387 struct nfs_client *clp = sp->so_client; 388 struct nfs_client *clp = sp->so_server->nfs_client;
388 389
389 spin_lock(&clp->cl_lock); 390 spin_lock(&clp->cl_lock);
390 rb_erase(&sp->so_client_node, &clp->cl_state_owners); 391 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
@@ -406,7 +407,6 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
406 new = nfs4_alloc_state_owner(); 407 new = nfs4_alloc_state_owner();
407 if (new == NULL) 408 if (new == NULL)
408 return NULL; 409 return NULL;
409 new->so_client = clp;
410 new->so_server = server; 410 new->so_server = server;
411 new->so_cred = cred; 411 new->so_cred = cred;
412 spin_lock(&clp->cl_lock); 412 spin_lock(&clp->cl_lock);
@@ -423,7 +423,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
423 423
424void nfs4_put_state_owner(struct nfs4_state_owner *sp) 424void nfs4_put_state_owner(struct nfs4_state_owner *sp)
425{ 425{
426 struct nfs_client *clp = sp->so_client; 426 struct nfs_client *clp = sp->so_server->nfs_client;
427 struct rpc_cred *cred = sp->so_cred; 427 struct rpc_cred *cred = sp->so_cred;
428 428
429 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) 429 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
@@ -602,12 +602,21 @@ void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
602 * that is compatible with current->files 602 * that is compatible with current->files
603 */ 603 */
604static struct nfs4_lock_state * 604static struct nfs4_lock_state *
605__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) 605__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
606{ 606{
607 struct nfs4_lock_state *pos; 607 struct nfs4_lock_state *pos;
608 list_for_each_entry(pos, &state->lock_states, ls_locks) { 608 list_for_each_entry(pos, &state->lock_states, ls_locks) {
609 if (pos->ls_owner != fl_owner) 609 if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
610 continue; 610 continue;
611 switch (pos->ls_owner.lo_type) {
612 case NFS4_POSIX_LOCK_TYPE:
613 if (pos->ls_owner.lo_u.posix_owner != fl_owner)
614 continue;
615 break;
616 case NFS4_FLOCK_LOCK_TYPE:
617 if (pos->ls_owner.lo_u.flock_owner != fl_pid)
618 continue;
619 }
611 atomic_inc(&pos->ls_count); 620 atomic_inc(&pos->ls_count);
612 return pos; 621 return pos;
613 } 622 }
@@ -619,10 +628,10 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
619 * exists, return an uninitialized one. 628 * exists, return an uninitialized one.
620 * 629 *
621 */ 630 */
622static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) 631static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
623{ 632{
624 struct nfs4_lock_state *lsp; 633 struct nfs4_lock_state *lsp;
625 struct nfs_client *clp = state->owner->so_client; 634 struct nfs_client *clp = state->owner->so_server->nfs_client;
626 635
627 lsp = kzalloc(sizeof(*lsp), GFP_NOFS); 636 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
628 if (lsp == NULL) 637 if (lsp == NULL)
@@ -633,7 +642,18 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
633 lsp->ls_seqid.sequence = &lsp->ls_sequence; 642 lsp->ls_seqid.sequence = &lsp->ls_sequence;
634 atomic_set(&lsp->ls_count, 1); 643 atomic_set(&lsp->ls_count, 1);
635 lsp->ls_state = state; 644 lsp->ls_state = state;
636 lsp->ls_owner = fl_owner; 645 lsp->ls_owner.lo_type = type;
646 switch (lsp->ls_owner.lo_type) {
647 case NFS4_FLOCK_LOCK_TYPE:
648 lsp->ls_owner.lo_u.flock_owner = fl_pid;
649 break;
650 case NFS4_POSIX_LOCK_TYPE:
651 lsp->ls_owner.lo_u.posix_owner = fl_owner;
652 break;
653 default:
654 kfree(lsp);
655 return NULL;
656 }
637 spin_lock(&clp->cl_lock); 657 spin_lock(&clp->cl_lock);
638 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); 658 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
639 spin_unlock(&clp->cl_lock); 659 spin_unlock(&clp->cl_lock);
@@ -643,7 +663,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
643 663
644static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) 664static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
645{ 665{
646 struct nfs_client *clp = lsp->ls_state->owner->so_client; 666 struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
647 667
648 spin_lock(&clp->cl_lock); 668 spin_lock(&clp->cl_lock);
649 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id); 669 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
@@ -657,13 +677,13 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
657 * exists, return an uninitialized one. 677 * exists, return an uninitialized one.
658 * 678 *
659 */ 679 */
660static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) 680static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
661{ 681{
662 struct nfs4_lock_state *lsp, *new = NULL; 682 struct nfs4_lock_state *lsp, *new = NULL;
663 683
664 for(;;) { 684 for(;;) {
665 spin_lock(&state->state_lock); 685 spin_lock(&state->state_lock);
666 lsp = __nfs4_find_lock_state(state, owner); 686 lsp = __nfs4_find_lock_state(state, owner, pid, type);
667 if (lsp != NULL) 687 if (lsp != NULL)
668 break; 688 break;
669 if (new != NULL) { 689 if (new != NULL) {
@@ -674,7 +694,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
674 break; 694 break;
675 } 695 }
676 spin_unlock(&state->state_lock); 696 spin_unlock(&state->state_lock);
677 new = nfs4_alloc_lock_state(state, owner); 697 new = nfs4_alloc_lock_state(state, owner, pid, type);
678 if (new == NULL) 698 if (new == NULL)
679 return NULL; 699 return NULL;
680 } 700 }
@@ -701,6 +721,8 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
701 if (list_empty(&state->lock_states)) 721 if (list_empty(&state->lock_states))
702 clear_bit(LK_STATE_IN_USE, &state->flags); 722 clear_bit(LK_STATE_IN_USE, &state->flags);
703 spin_unlock(&state->state_lock); 723 spin_unlock(&state->state_lock);
724 if (lsp->ls_flags & NFS_LOCK_INITIALIZED)
725 nfs4_release_lockowner(lsp);
704 nfs4_free_lock_state(lsp); 726 nfs4_free_lock_state(lsp);
705} 727}
706 728
@@ -728,7 +750,12 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
728 750
729 if (fl->fl_ops != NULL) 751 if (fl->fl_ops != NULL)
730 return 0; 752 return 0;
731 lsp = nfs4_get_lock_state(state, fl->fl_owner); 753 if (fl->fl_flags & FL_POSIX)
754 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
755 else if (fl->fl_flags & FL_FLOCK)
756 lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE);
757 else
758 return -EINVAL;
732 if (lsp == NULL) 759 if (lsp == NULL)
733 return -ENOMEM; 760 return -ENOMEM;
734 fl->fl_u.nfs4_fl.owner = lsp; 761 fl->fl_u.nfs4_fl.owner = lsp;
@@ -740,7 +767,7 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
740 * Byte-range lock aware utility to initialize the stateid of read/write 767 * Byte-range lock aware utility to initialize the stateid of read/write
741 * requests. 768 * requests.
742 */ 769 */
743void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) 770void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
744{ 771{
745 struct nfs4_lock_state *lsp; 772 struct nfs4_lock_state *lsp;
746 int seq; 773 int seq;
@@ -753,7 +780,7 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
753 return; 780 return;
754 781
755 spin_lock(&state->state_lock); 782 spin_lock(&state->state_lock);
756 lsp = __nfs4_find_lock_state(state, fl_owner); 783 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
757 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 784 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
758 memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); 785 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
759 spin_unlock(&state->state_lock); 786 spin_unlock(&state->state_lock);
@@ -1041,11 +1068,11 @@ restart:
1041 case -NFS4ERR_BAD_STATEID: 1068 case -NFS4ERR_BAD_STATEID:
1042 case -NFS4ERR_RECLAIM_BAD: 1069 case -NFS4ERR_RECLAIM_BAD:
1043 case -NFS4ERR_RECLAIM_CONFLICT: 1070 case -NFS4ERR_RECLAIM_CONFLICT:
1044 nfs4_state_mark_reclaim_nograce(sp->so_client, state); 1071 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1045 break; 1072 break;
1046 case -NFS4ERR_EXPIRED: 1073 case -NFS4ERR_EXPIRED:
1047 case -NFS4ERR_NO_GRACE: 1074 case -NFS4ERR_NO_GRACE:
1048 nfs4_state_mark_reclaim_nograce(sp->so_client, state); 1075 nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
1049 case -NFS4ERR_STALE_CLIENTID: 1076 case -NFS4ERR_STALE_CLIENTID:
1050 case -NFS4ERR_BADSESSION: 1077 case -NFS4ERR_BADSESSION:
1051 case -NFS4ERR_BADSLOT: 1078 case -NFS4ERR_BADSLOT:
@@ -1120,8 +1147,7 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
1120 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 1147 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1121 return; 1148 return;
1122 1149
1123 nfs4_reclaim_complete(clp, 1150 nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
1124 nfs4_reboot_recovery_ops[clp->cl_minorversion]);
1125 1151
1126 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { 1152 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1127 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); 1153 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
@@ -1211,8 +1237,8 @@ restart:
1211static int nfs4_check_lease(struct nfs_client *clp) 1237static int nfs4_check_lease(struct nfs_client *clp)
1212{ 1238{
1213 struct rpc_cred *cred; 1239 struct rpc_cred *cred;
1214 struct nfs4_state_maintenance_ops *ops = 1240 const struct nfs4_state_maintenance_ops *ops =
1215 nfs4_state_renewal_ops[clp->cl_minorversion]; 1241 clp->cl_mvops->state_renewal_ops;
1216 int status = -NFS4ERR_EXPIRED; 1242 int status = -NFS4ERR_EXPIRED;
1217 1243
1218 /* Is the client already known to have an expired lease? */ 1244 /* Is the client already known to have an expired lease? */
@@ -1235,8 +1261,8 @@ out:
1235static int nfs4_reclaim_lease(struct nfs_client *clp) 1261static int nfs4_reclaim_lease(struct nfs_client *clp)
1236{ 1262{
1237 struct rpc_cred *cred; 1263 struct rpc_cred *cred;
1238 struct nfs4_state_recovery_ops *ops = 1264 const struct nfs4_state_recovery_ops *ops =
1239 nfs4_reboot_recovery_ops[clp->cl_minorversion]; 1265 clp->cl_mvops->reboot_recovery_ops;
1240 int status = -ENOENT; 1266 int status = -ENOENT;
1241 1267
1242 cred = ops->get_clid_cred(clp); 1268 cred = ops->get_clid_cred(clp);
@@ -1444,7 +1470,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
1444 /* First recover reboot state... */ 1470 /* First recover reboot state... */
1445 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { 1471 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1446 status = nfs4_do_reclaim(clp, 1472 status = nfs4_do_reclaim(clp,
1447 nfs4_reboot_recovery_ops[clp->cl_minorversion]); 1473 clp->cl_mvops->reboot_recovery_ops);
1448 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 1474 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1449 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) 1475 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
1450 continue; 1476 continue;
@@ -1458,7 +1484,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
1458 /* Now recover expired state... */ 1484 /* Now recover expired state... */
1459 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { 1485 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1460 status = nfs4_do_reclaim(clp, 1486 status = nfs4_do_reclaim(clp,
1461 nfs4_nograce_recovery_ops[clp->cl_minorversion]); 1487 clp->cl_mvops->nograce_recovery_ops);
1462 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) || 1488 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
1463 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) || 1489 test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
1464 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) 1490 test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 65c8dae4b267..08ef91291132 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -202,14 +202,17 @@ static int nfs4_stat_to_errno(int);
202#define encode_link_maxsz (op_encode_hdr_maxsz + \ 202#define encode_link_maxsz (op_encode_hdr_maxsz + \
203 nfs4_name_maxsz) 203 nfs4_name_maxsz)
204#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) 204#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
205#define encode_lockowner_maxsz (7)
205#define encode_lock_maxsz (op_encode_hdr_maxsz + \ 206#define encode_lock_maxsz (op_encode_hdr_maxsz + \
206 7 + \ 207 7 + \
207 1 + encode_stateid_maxsz + 8) 208 1 + encode_stateid_maxsz + 1 + \
209 encode_lockowner_maxsz)
208#define decode_lock_denied_maxsz \ 210#define decode_lock_denied_maxsz \
209 (8 + decode_lockowner_maxsz) 211 (8 + decode_lockowner_maxsz)
210#define decode_lock_maxsz (op_decode_hdr_maxsz + \ 212#define decode_lock_maxsz (op_decode_hdr_maxsz + \
211 decode_lock_denied_maxsz) 213 decode_lock_denied_maxsz)
212#define encode_lockt_maxsz (op_encode_hdr_maxsz + 12) 214#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
215 encode_lockowner_maxsz)
213#define decode_lockt_maxsz (op_decode_hdr_maxsz + \ 216#define decode_lockt_maxsz (op_decode_hdr_maxsz + \
214 decode_lock_denied_maxsz) 217 decode_lock_denied_maxsz)
215#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \ 218#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \
@@ -217,6 +220,11 @@ static int nfs4_stat_to_errno(int);
217 4) 220 4)
218#define decode_locku_maxsz (op_decode_hdr_maxsz + \ 221#define decode_locku_maxsz (op_decode_hdr_maxsz + \
219 decode_stateid_maxsz) 222 decode_stateid_maxsz)
223#define encode_release_lockowner_maxsz \
224 (op_encode_hdr_maxsz + \
225 encode_lockowner_maxsz)
226#define decode_release_lockowner_maxsz \
227 (op_decode_hdr_maxsz)
220#define encode_access_maxsz (op_encode_hdr_maxsz + 1) 228#define encode_access_maxsz (op_encode_hdr_maxsz + 1)
221#define decode_access_maxsz (op_decode_hdr_maxsz + 2) 229#define decode_access_maxsz (op_decode_hdr_maxsz + 2)
222#define encode_symlink_maxsz (op_encode_hdr_maxsz + \ 230#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
@@ -471,6 +479,12 @@ static int nfs4_stat_to_errno(int);
471 decode_sequence_maxsz + \ 479 decode_sequence_maxsz + \
472 decode_putfh_maxsz + \ 480 decode_putfh_maxsz + \
473 decode_locku_maxsz) 481 decode_locku_maxsz)
482#define NFS4_enc_release_lockowner_sz \
483 (compound_encode_hdr_maxsz + \
484 encode_lockowner_maxsz)
485#define NFS4_dec_release_lockowner_sz \
486 (compound_decode_hdr_maxsz + \
487 decode_lockowner_maxsz)
474#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ 488#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
475 encode_sequence_maxsz + \ 489 encode_sequence_maxsz + \
476 encode_putfh_maxsz + \ 490 encode_putfh_maxsz + \
@@ -744,7 +758,7 @@ static void encode_compound_hdr(struct xdr_stream *xdr,
744 struct compound_hdr *hdr) 758 struct compound_hdr *hdr)
745{ 759{
746 __be32 *p; 760 __be32 *p;
747 struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; 761 struct rpc_auth *auth = req->rq_cred->cr_auth;
748 762
749 /* initialize running count of expected bytes in reply. 763 /* initialize running count of expected bytes in reply.
750 * NOTE: the replied tag SHOULD be the same is the one sent, 764 * NOTE: the replied tag SHOULD be the same is the one sent,
@@ -1042,6 +1056,17 @@ static inline uint64_t nfs4_lock_length(struct file_lock *fl)
1042 return fl->fl_end - fl->fl_start + 1; 1056 return fl->fl_end - fl->fl_start + 1;
1043} 1057}
1044 1058
1059static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner)
1060{
1061 __be32 *p;
1062
1063 p = reserve_space(xdr, 28);
1064 p = xdr_encode_hyper(p, lowner->clientid);
1065 *p++ = cpu_to_be32(16);
1066 p = xdr_encode_opaque_fixed(p, "lock id:", 8);
1067 xdr_encode_hyper(p, lowner->id);
1068}
1069
1045/* 1070/*
1046 * opcode,type,reclaim,offset,length,new_lock_owner = 32 1071 * opcode,type,reclaim,offset,length,new_lock_owner = 32
1047 * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40 1072 * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
@@ -1058,14 +1083,11 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
1058 p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); 1083 p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
1059 *p = cpu_to_be32(args->new_lock_owner); 1084 *p = cpu_to_be32(args->new_lock_owner);
1060 if (args->new_lock_owner){ 1085 if (args->new_lock_owner){
1061 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+32); 1086 p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4);
1062 *p++ = cpu_to_be32(args->open_seqid->sequence->counter); 1087 *p++ = cpu_to_be32(args->open_seqid->sequence->counter);
1063 p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE); 1088 p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE);
1064 *p++ = cpu_to_be32(args->lock_seqid->sequence->counter); 1089 *p++ = cpu_to_be32(args->lock_seqid->sequence->counter);
1065 p = xdr_encode_hyper(p, args->lock_owner.clientid); 1090 encode_lockowner(xdr, &args->lock_owner);
1066 *p++ = cpu_to_be32(16);
1067 p = xdr_encode_opaque_fixed(p, "lock id:", 8);
1068 xdr_encode_hyper(p, args->lock_owner.id);
1069 } 1091 }
1070 else { 1092 else {
1071 p = reserve_space(xdr, NFS4_STATEID_SIZE+4); 1093 p = reserve_space(xdr, NFS4_STATEID_SIZE+4);
@@ -1080,15 +1102,12 @@ static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *ar
1080{ 1102{
1081 __be32 *p; 1103 __be32 *p;
1082 1104
1083 p = reserve_space(xdr, 52); 1105 p = reserve_space(xdr, 24);
1084 *p++ = cpu_to_be32(OP_LOCKT); 1106 *p++ = cpu_to_be32(OP_LOCKT);
1085 *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); 1107 *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
1086 p = xdr_encode_hyper(p, args->fl->fl_start); 1108 p = xdr_encode_hyper(p, args->fl->fl_start);
1087 p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); 1109 p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
1088 p = xdr_encode_hyper(p, args->lock_owner.clientid); 1110 encode_lockowner(xdr, &args->lock_owner);
1089 *p++ = cpu_to_be32(16);
1090 p = xdr_encode_opaque_fixed(p, "lock id:", 8);
1091 xdr_encode_hyper(p, args->lock_owner.id);
1092 hdr->nops++; 1111 hdr->nops++;
1093 hdr->replen += decode_lockt_maxsz; 1112 hdr->replen += decode_lockt_maxsz;
1094} 1113}
@@ -1108,6 +1127,17 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
1108 hdr->replen += decode_locku_maxsz; 1127 hdr->replen += decode_locku_maxsz;
1109} 1128}
1110 1129
1130static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr)
1131{
1132 __be32 *p;
1133
1134 p = reserve_space(xdr, 4);
1135 *p = cpu_to_be32(OP_RELEASE_LOCKOWNER);
1136 encode_lockowner(xdr, lowner);
1137 hdr->nops++;
1138 hdr->replen += decode_release_lockowner_maxsz;
1139}
1140
1111static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) 1141static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
1112{ 1142{
1113 int len = name->len; 1143 int len = name->len;
@@ -1172,7 +1202,7 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op
1172 break; 1202 break;
1173 default: 1203 default:
1174 clp = arg->server->nfs_client; 1204 clp = arg->server->nfs_client;
1175 if (clp->cl_minorversion > 0) { 1205 if (clp->cl_mvops->minor_version > 0) {
1176 if (nfs4_has_persistent_session(clp)) { 1206 if (nfs4_has_persistent_session(clp)) {
1177 *p = cpu_to_be32(NFS4_CREATE_GUARDED); 1207 *p = cpu_to_be32(NFS4_CREATE_GUARDED);
1178 encode_attrs(xdr, arg->u.attrs, arg->server); 1208 encode_attrs(xdr, arg->u.attrs, arg->server);
@@ -1324,14 +1354,14 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
1324 hdr->replen += decode_putrootfh_maxsz; 1354 hdr->replen += decode_putrootfh_maxsz;
1325} 1355}
1326 1356
1327static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx) 1357static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx)
1328{ 1358{
1329 nfs4_stateid stateid; 1359 nfs4_stateid stateid;
1330 __be32 *p; 1360 __be32 *p;
1331 1361
1332 p = reserve_space(xdr, NFS4_STATEID_SIZE); 1362 p = reserve_space(xdr, NFS4_STATEID_SIZE);
1333 if (ctx->state != NULL) { 1363 if (ctx->state != NULL) {
1334 nfs4_copy_stateid(&stateid, ctx->state, ctx->lockowner); 1364 nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid);
1335 xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE); 1365 xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
1336 } else 1366 } else
1337 xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE); 1367 xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
@@ -1344,7 +1374,7 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args,
1344 p = reserve_space(xdr, 4); 1374 p = reserve_space(xdr, 4);
1345 *p = cpu_to_be32(OP_READ); 1375 *p = cpu_to_be32(OP_READ);
1346 1376
1347 encode_stateid(xdr, args->context); 1377 encode_stateid(xdr, args->context, args->lock_context);
1348 1378
1349 p = reserve_space(xdr, 12); 1379 p = reserve_space(xdr, 12);
1350 p = xdr_encode_hyper(p, args->offset); 1380 p = xdr_encode_hyper(p, args->offset);
@@ -1523,7 +1553,7 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
1523 p = reserve_space(xdr, 4); 1553 p = reserve_space(xdr, 4);
1524 *p = cpu_to_be32(OP_WRITE); 1554 *p = cpu_to_be32(OP_WRITE);
1525 1555
1526 encode_stateid(xdr, args->context); 1556 encode_stateid(xdr, args->context, args->lock_context);
1527 1557
1528 p = reserve_space(xdr, 16); 1558 p = reserve_space(xdr, 16);
1529 p = xdr_encode_hyper(p, args->offset); 1559 p = xdr_encode_hyper(p, args->offset);
@@ -1704,7 +1734,7 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
1704{ 1734{
1705#if defined(CONFIG_NFS_V4_1) 1735#if defined(CONFIG_NFS_V4_1)
1706 if (args->sa_session) 1736 if (args->sa_session)
1707 return args->sa_session->clp->cl_minorversion; 1737 return args->sa_session->clp->cl_mvops->minor_version;
1708#endif /* CONFIG_NFS_V4_1 */ 1738#endif /* CONFIG_NFS_V4_1 */
1709 return 0; 1739 return 0;
1710} 1740}
@@ -2048,6 +2078,20 @@ static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_
2048 return 0; 2078 return 0;
2049} 2079}
2050 2080
2081static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args)
2082{
2083 struct xdr_stream xdr;
2084 struct compound_hdr hdr = {
2085 .minorversion = 0,
2086 };
2087
2088 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
2089 encode_compound_hdr(&xdr, req, &hdr);
2090 encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
2091 encode_nops(&hdr);
2092 return 0;
2093}
2094
2051/* 2095/*
2052 * Encode a READLINK request 2096 * Encode a READLINK request
2053 */ 2097 */
@@ -2395,7 +2439,7 @@ static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
2395{ 2439{
2396 struct xdr_stream xdr; 2440 struct xdr_stream xdr;
2397 struct compound_hdr hdr = { 2441 struct compound_hdr hdr = {
2398 .minorversion = args->client->cl_minorversion, 2442 .minorversion = args->client->cl_mvops->minor_version,
2399 }; 2443 };
2400 2444
2401 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2445 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -2413,7 +2457,7 @@ static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
2413{ 2457{
2414 struct xdr_stream xdr; 2458 struct xdr_stream xdr;
2415 struct compound_hdr hdr = { 2459 struct compound_hdr hdr = {
2416 .minorversion = args->client->cl_minorversion, 2460 .minorversion = args->client->cl_mvops->minor_version,
2417 }; 2461 };
2418 2462
2419 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2463 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -2431,7 +2475,7 @@ static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
2431{ 2475{
2432 struct xdr_stream xdr; 2476 struct xdr_stream xdr;
2433 struct compound_hdr hdr = { 2477 struct compound_hdr hdr = {
2434 .minorversion = session->clp->cl_minorversion, 2478 .minorversion = session->clp->cl_mvops->minor_version,
2435 }; 2479 };
2436 2480
2437 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 2481 xdr_init_encode(&xdr, &req->rq_snd_buf, p);
@@ -3973,6 +4017,11 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
3973 return status; 4017 return status;
3974} 4018}
3975 4019
4020static int decode_release_lockowner(struct xdr_stream *xdr)
4021{
4022 return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER);
4023}
4024
3976static int decode_lookup(struct xdr_stream *xdr) 4025static int decode_lookup(struct xdr_stream *xdr)
3977{ 4026{
3978 return decode_op_hdr(xdr, OP_LOOKUP); 4027 return decode_op_hdr(xdr, OP_LOOKUP);
@@ -5259,6 +5308,19 @@ out:
5259 return status; 5308 return status;
5260} 5309}
5261 5310
5311static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
5312{
5313 struct xdr_stream xdr;
5314 struct compound_hdr hdr;
5315 int status;
5316
5317 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
5318 status = decode_compound_hdr(&xdr, &hdr);
5319 if (!status)
5320 status = decode_release_lockowner(&xdr);
5321 return status;
5322}
5323
5262/* 5324/*
5263 * Decode READLINK response 5325 * Decode READLINK response
5264 */ 5326 */
@@ -5866,6 +5928,7 @@ struct rpc_procinfo nfs4_procedures[] = {
5866 PROC(GETACL, enc_getacl, dec_getacl), 5928 PROC(GETACL, enc_getacl, dec_getacl),
5867 PROC(SETACL, enc_setacl, dec_setacl), 5929 PROC(SETACL, enc_setacl, dec_setacl),
5868 PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations), 5930 PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
5931 PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
5869#if defined(CONFIG_NFS_V4_1) 5932#if defined(CONFIG_NFS_V4_1)
5870 PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), 5933 PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
5871 PROC(CREATE_SESSION, enc_create_session, dec_create_session), 5934 PROC(CREATE_SESSION, enc_create_session, dec_create_session),
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 6bd19d843af7..df101d9f546a 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -105,7 +105,7 @@ static char nfs_root_name[256] __initdata = "";
105static __be32 servaddr __initdata = 0; 105static __be32 servaddr __initdata = 0;
106 106
107/* Name of directory to mount */ 107/* Name of directory to mount */
108static char nfs_export_path[NFS_MAXPATHLEN] __initdata = { 0, }; 108static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = { 0, };
109 109
110/* NFS-related data */ 110/* NFS-related data */
111static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */ 111static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index a3654e57b589..919490232e17 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -79,6 +79,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
79 req->wb_pgbase = offset; 79 req->wb_pgbase = offset;
80 req->wb_bytes = count; 80 req->wb_bytes = count;
81 req->wb_context = get_nfs_open_context(ctx); 81 req->wb_context = get_nfs_open_context(ctx);
82 req->wb_lock_context = nfs_get_lock_context(ctx);
82 kref_init(&req->wb_kref); 83 kref_init(&req->wb_kref);
83 return req; 84 return req;
84} 85}
@@ -141,11 +142,16 @@ void nfs_clear_request(struct nfs_page *req)
141{ 142{
142 struct page *page = req->wb_page; 143 struct page *page = req->wb_page;
143 struct nfs_open_context *ctx = req->wb_context; 144 struct nfs_open_context *ctx = req->wb_context;
145 struct nfs_lock_context *l_ctx = req->wb_lock_context;
144 146
145 if (page != NULL) { 147 if (page != NULL) {
146 page_cache_release(page); 148 page_cache_release(page);
147 req->wb_page = NULL; 149 req->wb_page = NULL;
148 } 150 }
151 if (l_ctx != NULL) {
152 nfs_put_lock_context(l_ctx);
153 req->wb_lock_context = NULL;
154 }
149 if (ctx != NULL) { 155 if (ctx != NULL) {
150 put_nfs_open_context(ctx); 156 put_nfs_open_context(ctx);
151 req->wb_context = NULL; 157 req->wb_context = NULL;
@@ -235,7 +241,7 @@ static int nfs_can_coalesce_requests(struct nfs_page *prev,
235{ 241{
236 if (req->wb_context->cred != prev->wb_context->cred) 242 if (req->wb_context->cred != prev->wb_context->cred)
237 return 0; 243 return 0;
238 if (req->wb_context->lockowner != prev->wb_context->lockowner) 244 if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
239 return 0; 245 return 0;
240 if (req->wb_context->state != prev->wb_context->state) 246 if (req->wb_context->state != prev->wb_context->state)
241 return 0; 247 return 0;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 6e2b06e6ca79..87adc2744246 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -190,6 +190,7 @@ static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
190 data->args.pages = data->pagevec; 190 data->args.pages = data->pagevec;
191 data->args.count = count; 191 data->args.count = count;
192 data->args.context = get_nfs_open_context(req->wb_context); 192 data->args.context = get_nfs_open_context(req->wb_context);
193 data->args.lock_context = req->wb_lock_context;
193 194
194 data->res.fattr = &data->fattr; 195 data->res.fattr = &data->fattr;
195 data->res.count = count; 196 data->res.count = count;
@@ -410,7 +411,7 @@ void nfs_read_prepare(struct rpc_task *task, void *calldata)
410{ 411{
411 struct nfs_read_data *data = calldata; 412 struct nfs_read_data *data = calldata;
412 413
413 if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 414 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
414 &data->args.seq_args, &data->res.seq_res, 415 &data->args.seq_args, &data->res.seq_res,
415 0, task)) 416 0, task))
416 return; 417 return;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f9df16de4a56..f1ae39f6cb02 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -546,6 +546,9 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
546{ 546{
547 struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address; 547 struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address;
548 548
549 if (nfss->flags & NFS_MOUNT_LEGACY_INTERFACE)
550 return;
551
549 switch (sap->sa_family) { 552 switch (sap->sa_family) {
550 case AF_INET: { 553 case AF_INET: {
551 struct sockaddr_in *sin = (struct sockaddr_in *)sap; 554 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
@@ -1780,6 +1783,7 @@ static int nfs_validate_mount_data(void *options,
1780 * can deal with. 1783 * can deal with.
1781 */ 1784 */
1782 args->flags = data->flags & NFS_MOUNT_FLAGMASK; 1785 args->flags = data->flags & NFS_MOUNT_FLAGMASK;
1786 args->flags |= NFS_MOUNT_LEGACY_INTERFACE;
1783 args->rsize = data->rsize; 1787 args->rsize = data->rsize;
1784 args->wsize = data->wsize; 1788 args->wsize = data->wsize;
1785 args->timeo = data->timeo; 1789 args->timeo = data->timeo;
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index a2242af6a17d..2f84adaad427 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -110,7 +110,7 @@ void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
110 struct nfs_unlinkdata *data = calldata; 110 struct nfs_unlinkdata *data = calldata;
111 struct nfs_server *server = NFS_SERVER(data->dir); 111 struct nfs_server *server = NFS_SERVER(data->dir);
112 112
113 if (nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 113 if (nfs4_setup_sequence(server, &data->args.seq_args,
114 &data->res.seq_res, 1, task)) 114 &data->res.seq_res, 1, task))
115 return; 115 return;
116 rpc_call_start(task); 116 rpc_call_start(task);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 91679e2631ee..874972d9427c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struct page *page)
222 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 222 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
223} 223}
224 224
225static struct nfs_page *nfs_find_and_lock_request(struct page *page) 225static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
226{ 226{
227 struct inode *inode = page->mapping->host; 227 struct inode *inode = page->mapping->host;
228 struct nfs_page *req; 228 struct nfs_page *req;
@@ -241,7 +241,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
241 * request as dirty (in which case we don't care). 241 * request as dirty (in which case we don't care).
242 */ 242 */
243 spin_unlock(&inode->i_lock); 243 spin_unlock(&inode->i_lock);
244 ret = nfs_wait_on_request(req); 244 if (!nonblock)
245 ret = nfs_wait_on_request(req);
246 else
247 ret = -EAGAIN;
245 nfs_release_request(req); 248 nfs_release_request(req);
246 if (ret != 0) 249 if (ret != 0)
247 return ERR_PTR(ret); 250 return ERR_PTR(ret);
@@ -256,12 +259,12 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
256 * May return an error if the user signalled nfs_wait_on_request(). 259 * May return an error if the user signalled nfs_wait_on_request().
257 */ 260 */
258static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 261static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
259 struct page *page) 262 struct page *page, bool nonblock)
260{ 263{
261 struct nfs_page *req; 264 struct nfs_page *req;
262 int ret = 0; 265 int ret = 0;
263 266
264 req = nfs_find_and_lock_request(page); 267 req = nfs_find_and_lock_request(page, nonblock);
265 if (!req) 268 if (!req)
266 goto out; 269 goto out;
267 ret = PTR_ERR(req); 270 ret = PTR_ERR(req);
@@ -283,12 +286,20 @@ out:
283static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 286static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
284{ 287{
285 struct inode *inode = page->mapping->host; 288 struct inode *inode = page->mapping->host;
289 int ret;
286 290
287 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 291 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
288 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); 292 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
289 293
290 nfs_pageio_cond_complete(pgio, page->index); 294 nfs_pageio_cond_complete(pgio, page->index);
291 return nfs_page_async_flush(pgio, page); 295 ret = nfs_page_async_flush(pgio, page,
296 wbc->sync_mode == WB_SYNC_NONE ||
297 wbc->nonblocking != 0);
298 if (ret == -EAGAIN) {
299 redirty_page_for_writepage(wbc, page);
300 ret = 0;
301 }
302 return ret;
292} 303}
293 304
294/* 305/*
@@ -689,7 +700,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
689 req = nfs_page_find_request(page); 700 req = nfs_page_find_request(page);
690 if (req == NULL) 701 if (req == NULL)
691 return 0; 702 return 0;
692 do_flush = req->wb_page != page || req->wb_context != ctx; 703 do_flush = req->wb_page != page || req->wb_context != ctx ||
704 req->wb_lock_context->lockowner != current->files ||
705 req->wb_lock_context->pid != current->tgid;
693 nfs_release_request(req); 706 nfs_release_request(req);
694 if (!do_flush) 707 if (!do_flush)
695 return 0; 708 return 0;
@@ -813,6 +826,7 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
813 data->args.pages = data->pagevec; 826 data->args.pages = data->pagevec;
814 data->args.count = count; 827 data->args.count = count;
815 data->args.context = get_nfs_open_context(req->wb_context); 828 data->args.context = get_nfs_open_context(req->wb_context);
829 data->args.lock_context = req->wb_lock_context;
816 data->args.stable = NFS_UNSTABLE; 830 data->args.stable = NFS_UNSTABLE;
817 if (how & FLUSH_STABLE) { 831 if (how & FLUSH_STABLE) {
818 data->args.stable = NFS_DATA_SYNC; 832 data->args.stable = NFS_DATA_SYNC;
@@ -1036,9 +1050,9 @@ out:
1036void nfs_write_prepare(struct rpc_task *task, void *calldata) 1050void nfs_write_prepare(struct rpc_task *task, void *calldata)
1037{ 1051{
1038 struct nfs_write_data *data = calldata; 1052 struct nfs_write_data *data = calldata;
1039 struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
1040 1053
1041 if (nfs4_setup_sequence(clp, &data->args.seq_args, 1054 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
1055 &data->args.seq_args,
1042 &data->res.seq_res, 1, task)) 1056 &data->res.seq_res, 1, task))
1043 return; 1057 return;
1044 rpc_call_start(task); 1058 rpc_call_start(task);
@@ -1379,7 +1393,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
1379 .rpc_release = nfs_commit_release, 1393 .rpc_release = nfs_commit_release,
1380}; 1394};
1381 1395
1382static int nfs_commit_inode(struct inode *inode, int how) 1396int nfs_commit_inode(struct inode *inode, int how)
1383{ 1397{
1384 LIST_HEAD(head); 1398 LIST_HEAD(head);
1385 int may_wait = how & FLUSH_SYNC; 1399 int may_wait = how & FLUSH_SYNC;
@@ -1443,11 +1457,6 @@ out_mark_dirty:
1443 return ret; 1457 return ret;
1444} 1458}
1445#else 1459#else
1446static int nfs_commit_inode(struct inode *inode, int how)
1447{
1448 return 0;
1449}
1450
1451static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 1460static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1452{ 1461{
1453 return 0; 1462 return 0;
@@ -1546,7 +1555,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1546 1555
1547 nfs_fscache_release_page(page, GFP_KERNEL); 1556 nfs_fscache_release_page(page, GFP_KERNEL);
1548 1557
1549 req = nfs_find_and_lock_request(page); 1558 req = nfs_find_and_lock_request(page, false);
1550 ret = PTR_ERR(req); 1559 ret = PTR_ERR(req);
1551 if (IS_ERR(req)) 1560 if (IS_ERR(req))
1552 goto out; 1561 goto out;