diff options
-rw-r--r-- | fs/nfsd/export.c | 44 | ||||
-rw-r--r-- | fs/nfsd/nfs4callback.c | 129 | ||||
-rw-r--r-- | fs/nfsd/nfs4proc.c | 45 | ||||
-rw-r--r-- | fs/nfsd/nfs4state.c | 163 | ||||
-rw-r--r-- | fs/nfsd/nfs4xdr.c | 14 | ||||
-rw-r--r-- | fs/nfsd/nfsctl.c | 64 | ||||
-rw-r--r-- | fs/nfsd/nfsd.h | 6 | ||||
-rw-r--r-- | fs/nfsd/state.h | 30 | ||||
-rw-r--r-- | fs/nfsd/vfs.c | 8 | ||||
-rw-r--r-- | fs/nfsd/vfs.h | 1 | ||||
-rw-r--r-- | fs/nfsd/xdr4.h | 5 | ||||
-rw-r--r-- | include/linux/nfsd/nfsfh.h | 6 | ||||
-rw-r--r-- | net/sunrpc/cache.c | 45 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 15 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 3 |
16 files changed, 330 insertions, 254 deletions
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 872a5ef550c7..c2a4f71d87dd 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c | |||
@@ -259,10 +259,9 @@ static struct cache_detail svc_expkey_cache = { | |||
259 | .alloc = expkey_alloc, | 259 | .alloc = expkey_alloc, |
260 | }; | 260 | }; |
261 | 261 | ||
262 | static struct svc_expkey * | 262 | static int |
263 | svc_expkey_lookup(struct svc_expkey *item) | 263 | svc_expkey_hash(struct svc_expkey *item) |
264 | { | 264 | { |
265 | struct cache_head *ch; | ||
266 | int hash = item->ek_fsidtype; | 265 | int hash = item->ek_fsidtype; |
267 | char * cp = (char*)item->ek_fsid; | 266 | char * cp = (char*)item->ek_fsid; |
268 | int len = key_len(item->ek_fsidtype); | 267 | int len = key_len(item->ek_fsidtype); |
@@ -270,6 +269,14 @@ svc_expkey_lookup(struct svc_expkey *item) | |||
270 | hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); | 269 | hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); |
271 | hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); | 270 | hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); |
272 | hash &= EXPKEY_HASHMASK; | 271 | hash &= EXPKEY_HASHMASK; |
272 | return hash; | ||
273 | } | ||
274 | |||
275 | static struct svc_expkey * | ||
276 | svc_expkey_lookup(struct svc_expkey *item) | ||
277 | { | ||
278 | struct cache_head *ch; | ||
279 | int hash = svc_expkey_hash(item); | ||
273 | 280 | ||
274 | ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h, | 281 | ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h, |
275 | hash); | 282 | hash); |
@@ -283,13 +290,7 @@ static struct svc_expkey * | |||
283 | svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old) | 290 | svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old) |
284 | { | 291 | { |
285 | struct cache_head *ch; | 292 | struct cache_head *ch; |
286 | int hash = new->ek_fsidtype; | 293 | int hash = svc_expkey_hash(new); |
287 | char * cp = (char*)new->ek_fsid; | ||
288 | int len = key_len(new->ek_fsidtype); | ||
289 | |||
290 | hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); | ||
291 | hash ^= hash_ptr(new->ek_client, EXPKEY_HASHBITS); | ||
292 | hash &= EXPKEY_HASHMASK; | ||
293 | 294 | ||
294 | ch = sunrpc_cache_update(&svc_expkey_cache, &new->h, | 295 | ch = sunrpc_cache_update(&svc_expkey_cache, &new->h, |
295 | &old->h, hash); | 296 | &old->h, hash); |
@@ -738,14 +739,22 @@ struct cache_detail svc_export_cache = { | |||
738 | .alloc = svc_export_alloc, | 739 | .alloc = svc_export_alloc, |
739 | }; | 740 | }; |
740 | 741 | ||
741 | static struct svc_export * | 742 | static int |
742 | svc_export_lookup(struct svc_export *exp) | 743 | svc_export_hash(struct svc_export *exp) |
743 | { | 744 | { |
744 | struct cache_head *ch; | ||
745 | int hash; | 745 | int hash; |
746 | |||
746 | hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); | 747 | hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); |
747 | hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS); | 748 | hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS); |
748 | hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS); | 749 | hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS); |
750 | return hash; | ||
751 | } | ||
752 | |||
753 | static struct svc_export * | ||
754 | svc_export_lookup(struct svc_export *exp) | ||
755 | { | ||
756 | struct cache_head *ch; | ||
757 | int hash = svc_export_hash(exp); | ||
749 | 758 | ||
750 | ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h, | 759 | ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h, |
751 | hash); | 760 | hash); |
@@ -759,10 +768,7 @@ static struct svc_export * | |||
759 | svc_export_update(struct svc_export *new, struct svc_export *old) | 768 | svc_export_update(struct svc_export *new, struct svc_export *old) |
760 | { | 769 | { |
761 | struct cache_head *ch; | 770 | struct cache_head *ch; |
762 | int hash; | 771 | int hash = svc_export_hash(old); |
763 | hash = hash_ptr(old->ex_client, EXPORT_HASHBITS); | ||
764 | hash ^= hash_ptr(old->ex_path.dentry, EXPORT_HASHBITS); | ||
765 | hash ^= hash_ptr(old->ex_path.mnt, EXPORT_HASHBITS); | ||
766 | 772 | ||
767 | ch = sunrpc_cache_update(&svc_export_cache, &new->h, | 773 | ch = sunrpc_cache_update(&svc_export_cache, &new->h, |
768 | &old->h, | 774 | &old->h, |
@@ -1071,9 +1077,9 @@ exp_export(struct nfsctl_export *nxp) | |||
1071 | err = 0; | 1077 | err = 0; |
1072 | finish: | 1078 | finish: |
1073 | kfree(new.ex_pathname); | 1079 | kfree(new.ex_pathname); |
1074 | if (exp) | 1080 | if (!IS_ERR_OR_NULL(exp)) |
1075 | exp_put(exp); | 1081 | exp_put(exp); |
1076 | if (fsid_key && !IS_ERR(fsid_key)) | 1082 | if (!IS_ERR_OR_NULL(fsid_key)) |
1077 | cache_put(&fsid_key->h, &svc_expkey_cache); | 1083 | cache_put(&fsid_key->h, &svc_expkey_cache); |
1078 | path_put(&path); | 1084 | path_put(&path); |
1079 | out_put_clp: | 1085 | out_put_clp: |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 7e32bd394e86..1d5051d46b46 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -32,6 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/sunrpc/clnt.h> | 34 | #include <linux/sunrpc/clnt.h> |
35 | #include <linux/sunrpc/svc_xprt.h> | ||
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
36 | #include "nfsd.h" | 37 | #include "nfsd.h" |
37 | #include "state.h" | 38 | #include "state.h" |
@@ -79,11 +80,6 @@ enum nfs_cb_opnum4 { | |||
79 | cb_sequence_dec_sz + \ | 80 | cb_sequence_dec_sz + \ |
80 | op_dec_sz) | 81 | op_dec_sz) |
81 | 82 | ||
82 | struct nfs4_rpc_args { | ||
83 | void *args_op; | ||
84 | struct nfsd4_cb_sequence args_seq; | ||
85 | }; | ||
86 | |||
87 | /* | 83 | /* |
88 | * Generic encode routines from fs/nfs/nfs4xdr.c | 84 | * Generic encode routines from fs/nfs/nfs4xdr.c |
89 | */ | 85 | */ |
@@ -456,15 +452,14 @@ static struct rpc_program cb_program = { | |||
456 | 452 | ||
457 | static int max_cb_time(void) | 453 | static int max_cb_time(void) |
458 | { | 454 | { |
459 | return max(NFSD_LEASE_TIME/10, (time_t)1) * HZ; | 455 | return max(nfsd4_lease/10, (time_t)1) * HZ; |
460 | } | 456 | } |
461 | 457 | ||
462 | /* Reference counting, callback cleanup, etc., all look racy as heck. | 458 | /* Reference counting, callback cleanup, etc., all look racy as heck. |
463 | * And why is cb_set an atomic? */ | 459 | * And why is cl_cb_set an atomic? */ |
464 | 460 | ||
465 | int setup_callback_client(struct nfs4_client *clp) | 461 | int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *cb) |
466 | { | 462 | { |
467 | struct nfs4_cb_conn *cb = &clp->cl_cb_conn; | ||
468 | struct rpc_timeout timeparms = { | 463 | struct rpc_timeout timeparms = { |
469 | .to_initval = max_cb_time(), | 464 | .to_initval = max_cb_time(), |
470 | .to_retries = 0, | 465 | .to_retries = 0, |
@@ -486,7 +481,7 @@ int setup_callback_client(struct nfs4_client *clp) | |||
486 | if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) | 481 | if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) |
487 | return -EINVAL; | 482 | return -EINVAL; |
488 | if (cb->cb_minorversion) { | 483 | if (cb->cb_minorversion) { |
489 | args.bc_xprt = clp->cl_cb_xprt; | 484 | args.bc_xprt = cb->cb_xprt; |
490 | args.protocol = XPRT_TRANSPORT_BC_TCP; | 485 | args.protocol = XPRT_TRANSPORT_BC_TCP; |
491 | } | 486 | } |
492 | /* Create RPC client */ | 487 | /* Create RPC client */ |
@@ -496,7 +491,7 @@ int setup_callback_client(struct nfs4_client *clp) | |||
496 | PTR_ERR(client)); | 491 | PTR_ERR(client)); |
497 | return PTR_ERR(client); | 492 | return PTR_ERR(client); |
498 | } | 493 | } |
499 | cb->cb_client = client; | 494 | nfsd4_set_callback_client(clp, client); |
500 | return 0; | 495 | return 0; |
501 | 496 | ||
502 | } | 497 | } |
@@ -514,8 +509,7 @@ static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) | |||
514 | if (task->tk_status) | 509 | if (task->tk_status) |
515 | warn_no_callback_path(clp, task->tk_status); | 510 | warn_no_callback_path(clp, task->tk_status); |
516 | else | 511 | else |
517 | atomic_set(&clp->cl_cb_conn.cb_set, 1); | 512 | atomic_set(&clp->cl_cb_set, 1); |
518 | put_nfs4_client(clp); | ||
519 | } | 513 | } |
520 | 514 | ||
521 | static const struct rpc_call_ops nfsd4_cb_probe_ops = { | 515 | static const struct rpc_call_ops nfsd4_cb_probe_ops = { |
@@ -537,7 +531,6 @@ int set_callback_cred(void) | |||
537 | 531 | ||
538 | void do_probe_callback(struct nfs4_client *clp) | 532 | void do_probe_callback(struct nfs4_client *clp) |
539 | { | 533 | { |
540 | struct nfs4_cb_conn *cb = &clp->cl_cb_conn; | ||
541 | struct rpc_message msg = { | 534 | struct rpc_message msg = { |
542 | .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], | 535 | .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], |
543 | .rpc_argp = clp, | 536 | .rpc_argp = clp, |
@@ -545,34 +538,27 @@ void do_probe_callback(struct nfs4_client *clp) | |||
545 | }; | 538 | }; |
546 | int status; | 539 | int status; |
547 | 540 | ||
548 | status = rpc_call_async(cb->cb_client, &msg, | 541 | status = rpc_call_async(clp->cl_cb_client, &msg, |
549 | RPC_TASK_SOFT | RPC_TASK_SOFTCONN, | 542 | RPC_TASK_SOFT | RPC_TASK_SOFTCONN, |
550 | &nfsd4_cb_probe_ops, (void *)clp); | 543 | &nfsd4_cb_probe_ops, (void *)clp); |
551 | if (status) { | 544 | if (status) |
552 | warn_no_callback_path(clp, status); | 545 | warn_no_callback_path(clp, status); |
553 | put_nfs4_client(clp); | ||
554 | } | ||
555 | } | 546 | } |
556 | 547 | ||
557 | /* | 548 | /* |
558 | * Set up the callback client and put a NFSPROC4_CB_NULL on the wire... | 549 | * Set up the callback client and put a NFSPROC4_CB_NULL on the wire... |
559 | */ | 550 | */ |
560 | void | 551 | void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *cb) |
561 | nfsd4_probe_callback(struct nfs4_client *clp) | ||
562 | { | 552 | { |
563 | int status; | 553 | int status; |
564 | 554 | ||
565 | BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set)); | 555 | BUG_ON(atomic_read(&clp->cl_cb_set)); |
566 | 556 | ||
567 | status = setup_callback_client(clp); | 557 | status = setup_callback_client(clp, cb); |
568 | if (status) { | 558 | if (status) { |
569 | warn_no_callback_path(clp, status); | 559 | warn_no_callback_path(clp, status); |
570 | return; | 560 | return; |
571 | } | 561 | } |
572 | |||
573 | /* the task holds a reference to the nfs4_client struct */ | ||
574 | atomic_inc(&clp->cl_count); | ||
575 | |||
576 | do_probe_callback(clp); | 562 | do_probe_callback(clp); |
577 | } | 563 | } |
578 | 564 | ||
@@ -658,18 +644,32 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) | |||
658 | } | 644 | } |
659 | } | 645 | } |
660 | 646 | ||
647 | |||
661 | static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) | 648 | static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) |
662 | { | 649 | { |
663 | struct nfs4_delegation *dp = calldata; | 650 | struct nfs4_delegation *dp = calldata; |
664 | struct nfs4_client *clp = dp->dl_client; | 651 | struct nfs4_client *clp = dp->dl_client; |
652 | struct rpc_clnt *current_rpc_client = clp->cl_cb_client; | ||
665 | 653 | ||
666 | nfsd4_cb_done(task, calldata); | 654 | nfsd4_cb_done(task, calldata); |
667 | 655 | ||
656 | if (current_rpc_client == NULL) { | ||
657 | /* We're shutting down; give up. */ | ||
658 | /* XXX: err, or is it ok just to fall through | ||
659 | * and rpc_restart_call? */ | ||
660 | return; | ||
661 | } | ||
662 | |||
668 | switch (task->tk_status) { | 663 | switch (task->tk_status) { |
669 | case -EIO: | 664 | case -EIO: |
670 | /* Network partition? */ | 665 | /* Network partition? */ |
671 | atomic_set(&clp->cl_cb_conn.cb_set, 0); | 666 | atomic_set(&clp->cl_cb_set, 0); |
672 | warn_no_callback_path(clp, task->tk_status); | 667 | warn_no_callback_path(clp, task->tk_status); |
668 | if (current_rpc_client != task->tk_client) { | ||
669 | /* queue a callback on the new connection: */ | ||
670 | nfsd4_cb_recall(dp); | ||
671 | return; | ||
672 | } | ||
673 | case -EBADHANDLE: | 673 | case -EBADHANDLE: |
674 | case -NFS4ERR_BAD_STATEID: | 674 | case -NFS4ERR_BAD_STATEID: |
675 | /* Race: client probably got cb_recall | 675 | /* Race: client probably got cb_recall |
@@ -677,7 +677,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) | |||
677 | break; | 677 | break; |
678 | default: | 678 | default: |
679 | /* success, or error we can't handle */ | 679 | /* success, or error we can't handle */ |
680 | goto done; | 680 | return; |
681 | } | 681 | } |
682 | if (dp->dl_retries--) { | 682 | if (dp->dl_retries--) { |
683 | rpc_delay(task, 2*HZ); | 683 | rpc_delay(task, 2*HZ); |
@@ -685,20 +685,16 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) | |||
685 | rpc_restart_call(task); | 685 | rpc_restart_call(task); |
686 | return; | 686 | return; |
687 | } else { | 687 | } else { |
688 | atomic_set(&clp->cl_cb_conn.cb_set, 0); | 688 | atomic_set(&clp->cl_cb_set, 0); |
689 | warn_no_callback_path(clp, task->tk_status); | 689 | warn_no_callback_path(clp, task->tk_status); |
690 | } | 690 | } |
691 | done: | ||
692 | kfree(task->tk_msg.rpc_argp); | ||
693 | } | 691 | } |
694 | 692 | ||
695 | static void nfsd4_cb_recall_release(void *calldata) | 693 | static void nfsd4_cb_recall_release(void *calldata) |
696 | { | 694 | { |
697 | struct nfs4_delegation *dp = calldata; | 695 | struct nfs4_delegation *dp = calldata; |
698 | struct nfs4_client *clp = dp->dl_client; | ||
699 | 696 | ||
700 | nfs4_put_delegation(dp); | 697 | nfs4_put_delegation(dp); |
701 | put_nfs4_client(clp); | ||
702 | } | 698 | } |
703 | 699 | ||
704 | static const struct rpc_call_ops nfsd4_cb_recall_ops = { | 700 | static const struct rpc_call_ops nfsd4_cb_recall_ops = { |
@@ -707,33 +703,74 @@ static const struct rpc_call_ops nfsd4_cb_recall_ops = { | |||
707 | .rpc_release = nfsd4_cb_recall_release, | 703 | .rpc_release = nfsd4_cb_recall_release, |
708 | }; | 704 | }; |
709 | 705 | ||
706 | static struct workqueue_struct *callback_wq; | ||
707 | |||
708 | int nfsd4_create_callback_queue(void) | ||
709 | { | ||
710 | callback_wq = create_singlethread_workqueue("nfsd4_callbacks"); | ||
711 | if (!callback_wq) | ||
712 | return -ENOMEM; | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | void nfsd4_destroy_callback_queue(void) | ||
717 | { | ||
718 | destroy_workqueue(callback_wq); | ||
719 | } | ||
720 | |||
721 | void nfsd4_set_callback_client(struct nfs4_client *clp, struct rpc_clnt *new) | ||
722 | { | ||
723 | struct rpc_clnt *old = clp->cl_cb_client; | ||
724 | |||
725 | clp->cl_cb_client = new; | ||
726 | /* | ||
727 | * After this, any work that saw the old value of cl_cb_client will | ||
728 | * be gone: | ||
729 | */ | ||
730 | flush_workqueue(callback_wq); | ||
731 | /* So we can safely shut it down: */ | ||
732 | if (old) | ||
733 | rpc_shutdown_client(old); | ||
734 | } | ||
735 | |||
710 | /* | 736 | /* |
711 | * called with dp->dl_count inc'ed. | 737 | * called with dp->dl_count inc'ed. |
712 | */ | 738 | */ |
713 | void | 739 | static void _nfsd4_cb_recall(struct nfs4_delegation *dp) |
714 | nfsd4_cb_recall(struct nfs4_delegation *dp) | ||
715 | { | 740 | { |
716 | struct nfs4_client *clp = dp->dl_client; | 741 | struct nfs4_client *clp = dp->dl_client; |
717 | struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client; | 742 | struct rpc_clnt *clnt = clp->cl_cb_client; |
718 | struct nfs4_rpc_args *args; | 743 | struct nfs4_rpc_args *args = &dp->dl_recall.cb_args; |
719 | struct rpc_message msg = { | 744 | struct rpc_message msg = { |
720 | .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL], | 745 | .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL], |
721 | .rpc_cred = callback_cred | 746 | .rpc_cred = callback_cred |
722 | }; | 747 | }; |
723 | int status = -ENOMEM; | 748 | int status; |
749 | |||
750 | if (clnt == NULL) | ||
751 | return; /* Client is shutting down; give up. */ | ||
724 | 752 | ||
725 | args = kzalloc(sizeof(*args), GFP_KERNEL); | ||
726 | if (!args) | ||
727 | goto out; | ||
728 | args->args_op = dp; | 753 | args->args_op = dp; |
729 | msg.rpc_argp = args; | 754 | msg.rpc_argp = args; |
730 | dp->dl_retries = 1; | 755 | dp->dl_retries = 1; |
731 | status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT, | 756 | status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT, |
732 | &nfsd4_cb_recall_ops, dp); | 757 | &nfsd4_cb_recall_ops, dp); |
733 | out: | 758 | if (status) |
734 | if (status) { | ||
735 | kfree(args); | ||
736 | put_nfs4_client(clp); | ||
737 | nfs4_put_delegation(dp); | 759 | nfs4_put_delegation(dp); |
738 | } | 760 | } |
761 | |||
762 | void nfsd4_do_callback_rpc(struct work_struct *w) | ||
763 | { | ||
764 | /* XXX: for now, just send off delegation recall. */ | ||
765 | /* In future, generalize to handle any sort of callback. */ | ||
766 | struct nfsd4_callback *c = container_of(w, struct nfsd4_callback, cb_work); | ||
767 | struct nfs4_delegation *dp = container_of(c, struct nfs4_delegation, dl_recall); | ||
768 | |||
769 | _nfsd4_cb_recall(dp); | ||
770 | } | ||
771 | |||
772 | |||
773 | void nfsd4_cb_recall(struct nfs4_delegation *dp) | ||
774 | { | ||
775 | queue_work(callback_wq, &dp->dl_recall.cb_work); | ||
739 | } | 776 | } |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 2ab9e8501bfe..e2dc9608281b 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
@@ -969,20 +969,36 @@ static struct nfsd4_operation nfsd4_ops[]; | |||
969 | static const char *nfsd4_op_name(unsigned opnum); | 969 | static const char *nfsd4_op_name(unsigned opnum); |
970 | 970 | ||
971 | /* | 971 | /* |
972 | * Enforce NFSv4.1 COMPOUND ordering rules. | 972 | * Enforce NFSv4.1 COMPOUND ordering rules: |
973 | * | 973 | * |
974 | * TODO: | 974 | * Also note, enforced elsewhere: |
975 | * - enforce NFS4ERR_NOT_ONLY_OP, | 975 | * - SEQUENCE other than as first op results in |
976 | * - DESTROY_SESSION MUST be the final operation in the COMPOUND request. | 976 | * NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().) |
977 | * - BIND_CONN_TO_SESSION must be the only op in its compound | ||
978 | * (Will be enforced in nfsd4_bind_conn_to_session().) | ||
979 | * - DESTROY_SESSION must be the final operation in a compound, if | ||
980 | * sessionid's in SEQUENCE and DESTROY_SESSION are the same. | ||
981 | * (Enforced in nfsd4_destroy_session().) | ||
977 | */ | 982 | */ |
978 | static bool nfs41_op_ordering_ok(struct nfsd4_compoundargs *args) | 983 | static __be32 nfs41_check_op_ordering(struct nfsd4_compoundargs *args) |
979 | { | 984 | { |
980 | if (args->minorversion && args->opcnt > 0) { | 985 | struct nfsd4_op *op = &args->ops[0]; |
981 | struct nfsd4_op *op = &args->ops[0]; | 986 | |
982 | return (op->status == nfserr_op_illegal) || | 987 | /* These ordering requirements don't apply to NFSv4.0: */ |
983 | (nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP); | 988 | if (args->minorversion == 0) |
984 | } | 989 | return nfs_ok; |
985 | return true; | 990 | /* This is weird, but OK, not our problem: */ |
991 | if (args->opcnt == 0) | ||
992 | return nfs_ok; | ||
993 | if (op->status == nfserr_op_illegal) | ||
994 | return nfs_ok; | ||
995 | if (!(nfsd4_ops[op->opnum].op_flags & ALLOWED_AS_FIRST_OP)) | ||
996 | return nfserr_op_not_in_session; | ||
997 | if (op->opnum == OP_SEQUENCE) | ||
998 | return nfs_ok; | ||
999 | if (args->opcnt != 1) | ||
1000 | return nfserr_not_only_op; | ||
1001 | return nfs_ok; | ||
986 | } | 1002 | } |
987 | 1003 | ||
988 | /* | 1004 | /* |
@@ -1012,6 +1028,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, | |||
1012 | resp->rqstp = rqstp; | 1028 | resp->rqstp = rqstp; |
1013 | resp->cstate.minorversion = args->minorversion; | 1029 | resp->cstate.minorversion = args->minorversion; |
1014 | resp->cstate.replay_owner = NULL; | 1030 | resp->cstate.replay_owner = NULL; |
1031 | resp->cstate.session = NULL; | ||
1015 | fh_init(&resp->cstate.current_fh, NFS4_FHSIZE); | 1032 | fh_init(&resp->cstate.current_fh, NFS4_FHSIZE); |
1016 | fh_init(&resp->cstate.save_fh, NFS4_FHSIZE); | 1033 | fh_init(&resp->cstate.save_fh, NFS4_FHSIZE); |
1017 | /* Use the deferral mechanism only for NFSv4.0 compounds */ | 1034 | /* Use the deferral mechanism only for NFSv4.0 compounds */ |
@@ -1024,13 +1041,13 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, | |||
1024 | if (args->minorversion > nfsd_supported_minorversion) | 1041 | if (args->minorversion > nfsd_supported_minorversion) |
1025 | goto out; | 1042 | goto out; |
1026 | 1043 | ||
1027 | if (!nfs41_op_ordering_ok(args)) { | 1044 | status = nfs41_check_op_ordering(args); |
1045 | if (status) { | ||
1028 | op = &args->ops[0]; | 1046 | op = &args->ops[0]; |
1029 | op->status = nfserr_sequence_pos; | 1047 | op->status = status; |
1030 | goto encode_op; | 1048 | goto encode_op; |
1031 | } | 1049 | } |
1032 | 1050 | ||
1033 | status = nfs_ok; | ||
1034 | while (!status && resp->opcnt < args->opcnt) { | 1051 | while (!status && resp->opcnt < args->opcnt) { |
1035 | op = &args->ops[resp->opcnt++]; | 1052 | op = &args->ops[resp->opcnt++]; |
1036 | 1053 | ||
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 6a8fedaa4f55..f05a3276ba6b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -45,8 +45,8 @@ | |||
45 | #define NFSDDBG_FACILITY NFSDDBG_PROC | 45 | #define NFSDDBG_FACILITY NFSDDBG_PROC |
46 | 46 | ||
47 | /* Globals */ | 47 | /* Globals */ |
48 | static time_t lease_time = 90; /* default lease time */ | 48 | time_t nfsd4_lease = 90; /* default lease time */ |
49 | static time_t user_lease_time = 90; | 49 | time_t nfsd4_grace = 90; |
50 | static time_t boot_time; | 50 | static time_t boot_time; |
51 | static u32 current_ownerid = 1; | 51 | static u32 current_ownerid = 1; |
52 | static u32 current_fileid = 1; | 52 | static u32 current_fileid = 1; |
@@ -199,6 +199,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
199 | atomic_set(&dp->dl_count, 1); | 199 | atomic_set(&dp->dl_count, 1); |
200 | list_add(&dp->dl_perfile, &fp->fi_delegations); | 200 | list_add(&dp->dl_perfile, &fp->fi_delegations); |
201 | list_add(&dp->dl_perclnt, &clp->cl_delegations); | 201 | list_add(&dp->dl_perclnt, &clp->cl_delegations); |
202 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); | ||
202 | return dp; | 203 | return dp; |
203 | } | 204 | } |
204 | 205 | ||
@@ -680,27 +681,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
680 | return clp; | 681 | return clp; |
681 | } | 682 | } |
682 | 683 | ||
683 | static void | ||
684 | shutdown_callback_client(struct nfs4_client *clp) | ||
685 | { | ||
686 | struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client; | ||
687 | |||
688 | if (clnt) { | ||
689 | /* | ||
690 | * Callback threads take a reference on the client, so there | ||
691 | * should be no outstanding callbacks at this point. | ||
692 | */ | ||
693 | clp->cl_cb_conn.cb_client = NULL; | ||
694 | rpc_shutdown_client(clnt); | ||
695 | } | ||
696 | } | ||
697 | |||
698 | static inline void | 684 | static inline void |
699 | free_client(struct nfs4_client *clp) | 685 | free_client(struct nfs4_client *clp) |
700 | { | 686 | { |
701 | shutdown_callback_client(clp); | ||
702 | if (clp->cl_cb_xprt) | ||
703 | svc_xprt_put(clp->cl_cb_xprt); | ||
704 | if (clp->cl_cred.cr_group_info) | 687 | if (clp->cl_cred.cr_group_info) |
705 | put_group_info(clp->cl_cred.cr_group_info); | 688 | put_group_info(clp->cl_cred.cr_group_info); |
706 | kfree(clp->cl_principal); | 689 | kfree(clp->cl_principal); |
@@ -708,13 +691,6 @@ free_client(struct nfs4_client *clp) | |||
708 | kfree(clp); | 691 | kfree(clp); |
709 | } | 692 | } |
710 | 693 | ||
711 | void | ||
712 | put_nfs4_client(struct nfs4_client *clp) | ||
713 | { | ||
714 | if (atomic_dec_and_test(&clp->cl_count)) | ||
715 | free_client(clp); | ||
716 | } | ||
717 | |||
718 | static void | 694 | static void |
719 | expire_client(struct nfs4_client *clp) | 695 | expire_client(struct nfs4_client *clp) |
720 | { | 696 | { |
@@ -722,9 +698,6 @@ expire_client(struct nfs4_client *clp) | |||
722 | struct nfs4_delegation *dp; | 698 | struct nfs4_delegation *dp; |
723 | struct list_head reaplist; | 699 | struct list_head reaplist; |
724 | 700 | ||
725 | dprintk("NFSD: expire_client cl_count %d\n", | ||
726 | atomic_read(&clp->cl_count)); | ||
727 | |||
728 | INIT_LIST_HEAD(&reaplist); | 701 | INIT_LIST_HEAD(&reaplist); |
729 | spin_lock(&recall_lock); | 702 | spin_lock(&recall_lock); |
730 | while (!list_empty(&clp->cl_delegations)) { | 703 | while (!list_empty(&clp->cl_delegations)) { |
@@ -753,7 +726,10 @@ expire_client(struct nfs4_client *clp) | |||
753 | se_perclnt); | 726 | se_perclnt); |
754 | release_session(ses); | 727 | release_session(ses); |
755 | } | 728 | } |
756 | put_nfs4_client(clp); | 729 | nfsd4_set_callback_client(clp, NULL); |
730 | if (clp->cl_cb_conn.cb_xprt) | ||
731 | svc_xprt_put(clp->cl_cb_conn.cb_xprt); | ||
732 | free_client(clp); | ||
757 | } | 733 | } |
758 | 734 | ||
759 | static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) | 735 | static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) |
@@ -839,8 +815,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, | |||
839 | } | 815 | } |
840 | 816 | ||
841 | memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); | 817 | memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); |
842 | atomic_set(&clp->cl_count, 1); | 818 | atomic_set(&clp->cl_cb_set, 0); |
843 | atomic_set(&clp->cl_cb_conn.cb_set, 0); | ||
844 | INIT_LIST_HEAD(&clp->cl_idhash); | 819 | INIT_LIST_HEAD(&clp->cl_idhash); |
845 | INIT_LIST_HEAD(&clp->cl_strhash); | 820 | INIT_LIST_HEAD(&clp->cl_strhash); |
846 | INIT_LIST_HEAD(&clp->cl_openowners); | 821 | INIT_LIST_HEAD(&clp->cl_openowners); |
@@ -1327,15 +1302,9 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1327 | cs_slot->sl_seqid++; /* from 0 to 1 */ | 1302 | cs_slot->sl_seqid++; /* from 0 to 1 */ |
1328 | move_to_confirmed(unconf); | 1303 | move_to_confirmed(unconf); |
1329 | 1304 | ||
1330 | /* | ||
1331 | * We do not support RDMA or persistent sessions | ||
1332 | */ | ||
1333 | cr_ses->flags &= ~SESSION4_PERSIST; | ||
1334 | cr_ses->flags &= ~SESSION4_RDMA; | ||
1335 | |||
1336 | if (cr_ses->flags & SESSION4_BACK_CHAN) { | 1305 | if (cr_ses->flags & SESSION4_BACK_CHAN) { |
1337 | unconf->cl_cb_xprt = rqstp->rq_xprt; | 1306 | unconf->cl_cb_conn.cb_xprt = rqstp->rq_xprt; |
1338 | svc_xprt_get(unconf->cl_cb_xprt); | 1307 | svc_xprt_get(rqstp->rq_xprt); |
1339 | rpc_copy_addr( | 1308 | rpc_copy_addr( |
1340 | (struct sockaddr *)&unconf->cl_cb_conn.cb_addr, | 1309 | (struct sockaddr *)&unconf->cl_cb_conn.cb_addr, |
1341 | sa); | 1310 | sa); |
@@ -1344,7 +1313,7 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1344 | cstate->minorversion; | 1313 | cstate->minorversion; |
1345 | unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog; | 1314 | unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog; |
1346 | unconf->cl_cb_seq_nr = 1; | 1315 | unconf->cl_cb_seq_nr = 1; |
1347 | nfsd4_probe_callback(unconf); | 1316 | nfsd4_probe_callback(unconf, &unconf->cl_cb_conn); |
1348 | } | 1317 | } |
1349 | conf = unconf; | 1318 | conf = unconf; |
1350 | } else { | 1319 | } else { |
@@ -1352,6 +1321,12 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1352 | goto out; | 1321 | goto out; |
1353 | } | 1322 | } |
1354 | 1323 | ||
1324 | /* | ||
1325 | * We do not support RDMA or persistent sessions | ||
1326 | */ | ||
1327 | cr_ses->flags &= ~SESSION4_PERSIST; | ||
1328 | cr_ses->flags &= ~SESSION4_RDMA; | ||
1329 | |||
1355 | status = alloc_init_session(rqstp, conf, cr_ses); | 1330 | status = alloc_init_session(rqstp, conf, cr_ses); |
1356 | if (status) | 1331 | if (status) |
1357 | goto out; | 1332 | goto out; |
@@ -1369,6 +1344,14 @@ out: | |||
1369 | return status; | 1344 | return status; |
1370 | } | 1345 | } |
1371 | 1346 | ||
1347 | static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) | ||
1348 | { | ||
1349 | struct nfsd4_compoundres *resp = rqstp->rq_resp; | ||
1350 | struct nfsd4_compoundargs *argp = rqstp->rq_argp; | ||
1351 | |||
1352 | return argp->opcnt == resp->opcnt; | ||
1353 | } | ||
1354 | |||
1372 | __be32 | 1355 | __be32 |
1373 | nfsd4_destroy_session(struct svc_rqst *r, | 1356 | nfsd4_destroy_session(struct svc_rqst *r, |
1374 | struct nfsd4_compound_state *cstate, | 1357 | struct nfsd4_compound_state *cstate, |
@@ -1384,6 +1367,11 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
1384 | * - Do we need to clear any callback info from previous session? | 1367 | * - Do we need to clear any callback info from previous session? |
1385 | */ | 1368 | */ |
1386 | 1369 | ||
1370 | if (!memcmp(&sessionid->sessionid, &cstate->session->se_sessionid, | ||
1371 | sizeof(struct nfs4_sessionid))) { | ||
1372 | if (!nfsd4_last_compound_op(r)) | ||
1373 | return nfserr_not_only_op; | ||
1374 | } | ||
1387 | dump_sessionid(__func__, &sessionid->sessionid); | 1375 | dump_sessionid(__func__, &sessionid->sessionid); |
1388 | spin_lock(&sessionid_lock); | 1376 | spin_lock(&sessionid_lock); |
1389 | ses = find_in_sessionid_hashtbl(&sessionid->sessionid); | 1377 | ses = find_in_sessionid_hashtbl(&sessionid->sessionid); |
@@ -1396,7 +1384,7 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
1396 | spin_unlock(&sessionid_lock); | 1384 | spin_unlock(&sessionid_lock); |
1397 | 1385 | ||
1398 | /* wait for callbacks */ | 1386 | /* wait for callbacks */ |
1399 | shutdown_callback_client(ses->se_client); | 1387 | nfsd4_set_callback_client(ses->se_client, NULL); |
1400 | nfsd4_put_session(ses); | 1388 | nfsd4_put_session(ses); |
1401 | status = nfs_ok; | 1389 | status = nfs_ok; |
1402 | out: | 1390 | out: |
@@ -1456,11 +1444,10 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1456 | cstate->slot = slot; | 1444 | cstate->slot = slot; |
1457 | cstate->session = session; | 1445 | cstate->session = session; |
1458 | 1446 | ||
1459 | /* Hold a session reference until done processing the compound: | ||
1460 | * nfsd4_put_session called only if the cstate slot is set. | ||
1461 | */ | ||
1462 | nfsd4_get_session(session); | ||
1463 | out: | 1447 | out: |
1448 | /* Hold a session reference until done processing the compound. */ | ||
1449 | if (cstate->session) | ||
1450 | nfsd4_get_session(cstate->session); | ||
1464 | spin_unlock(&sessionid_lock); | 1451 | spin_unlock(&sessionid_lock); |
1465 | /* Renew the clientid on success and on replay */ | 1452 | /* Renew the clientid on success and on replay */ |
1466 | if (cstate->session) { | 1453 | if (cstate->session) { |
@@ -1631,9 +1618,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
1631 | if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) | 1618 | if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) |
1632 | status = nfserr_clid_inuse; | 1619 | status = nfserr_clid_inuse; |
1633 | else { | 1620 | else { |
1634 | /* XXX: We just turn off callbacks until we can handle | 1621 | atomic_set(&conf->cl_cb_set, 0); |
1635 | * change request correctly. */ | 1622 | nfsd4_probe_callback(conf, &unconf->cl_cb_conn); |
1636 | atomic_set(&conf->cl_cb_conn.cb_set, 0); | ||
1637 | expire_client(unconf); | 1623 | expire_client(unconf); |
1638 | status = nfs_ok; | 1624 | status = nfs_ok; |
1639 | 1625 | ||
@@ -1667,7 +1653,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
1667 | } | 1653 | } |
1668 | move_to_confirmed(unconf); | 1654 | move_to_confirmed(unconf); |
1669 | conf = unconf; | 1655 | conf = unconf; |
1670 | nfsd4_probe_callback(conf); | 1656 | nfsd4_probe_callback(conf, &conf->cl_cb_conn); |
1671 | status = nfs_ok; | 1657 | status = nfs_ok; |
1672 | } | 1658 | } |
1673 | } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) | 1659 | } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) |
@@ -2028,7 +2014,6 @@ void nfsd_break_deleg_cb(struct file_lock *fl) | |||
2028 | * lock) we know the server hasn't removed the lease yet, we know | 2014 | * lock) we know the server hasn't removed the lease yet, we know |
2029 | * it's safe to take a reference: */ | 2015 | * it's safe to take a reference: */ |
2030 | atomic_inc(&dp->dl_count); | 2016 | atomic_inc(&dp->dl_count); |
2031 | atomic_inc(&dp->dl_client->cl_count); | ||
2032 | 2017 | ||
2033 | spin_lock(&recall_lock); | 2018 | spin_lock(&recall_lock); |
2034 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); | 2019 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); |
@@ -2347,7 +2332,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2347 | { | 2332 | { |
2348 | struct nfs4_delegation *dp; | 2333 | struct nfs4_delegation *dp; |
2349 | struct nfs4_stateowner *sop = stp->st_stateowner; | 2334 | struct nfs4_stateowner *sop = stp->st_stateowner; |
2350 | struct nfs4_cb_conn *cb = &sop->so_client->cl_cb_conn; | 2335 | int cb_up = atomic_read(&sop->so_client->cl_cb_set); |
2351 | struct file_lock fl, *flp = &fl; | 2336 | struct file_lock fl, *flp = &fl; |
2352 | int status, flag = 0; | 2337 | int status, flag = 0; |
2353 | 2338 | ||
@@ -2355,7 +2340,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2355 | open->op_recall = 0; | 2340 | open->op_recall = 0; |
2356 | switch (open->op_claim_type) { | 2341 | switch (open->op_claim_type) { |
2357 | case NFS4_OPEN_CLAIM_PREVIOUS: | 2342 | case NFS4_OPEN_CLAIM_PREVIOUS: |
2358 | if (!atomic_read(&cb->cb_set)) | 2343 | if (!cb_up) |
2359 | open->op_recall = 1; | 2344 | open->op_recall = 1; |
2360 | flag = open->op_delegate_type; | 2345 | flag = open->op_delegate_type; |
2361 | if (flag == NFS4_OPEN_DELEGATE_NONE) | 2346 | if (flag == NFS4_OPEN_DELEGATE_NONE) |
@@ -2366,7 +2351,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2366 | * had the chance to reclaim theirs.... */ | 2351 | * had the chance to reclaim theirs.... */ |
2367 | if (locks_in_grace()) | 2352 | if (locks_in_grace()) |
2368 | goto out; | 2353 | goto out; |
2369 | if (!atomic_read(&cb->cb_set) || !sop->so_confirmed) | 2354 | if (!cb_up || !sop->so_confirmed) |
2370 | goto out; | 2355 | goto out; |
2371 | if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) | 2356 | if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) |
2372 | flag = NFS4_OPEN_DELEGATE_WRITE; | 2357 | flag = NFS4_OPEN_DELEGATE_WRITE; |
@@ -2537,7 +2522,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
2537 | renew_client(clp); | 2522 | renew_client(clp); |
2538 | status = nfserr_cb_path_down; | 2523 | status = nfserr_cb_path_down; |
2539 | if (!list_empty(&clp->cl_delegations) | 2524 | if (!list_empty(&clp->cl_delegations) |
2540 | && !atomic_read(&clp->cl_cb_conn.cb_set)) | 2525 | && !atomic_read(&clp->cl_cb_set)) |
2541 | goto out; | 2526 | goto out; |
2542 | status = nfs_ok; | 2527 | status = nfs_ok; |
2543 | out: | 2528 | out: |
@@ -2554,6 +2539,12 @@ nfsd4_end_grace(void) | |||
2554 | dprintk("NFSD: end of grace period\n"); | 2539 | dprintk("NFSD: end of grace period\n"); |
2555 | nfsd4_recdir_purge_old(); | 2540 | nfsd4_recdir_purge_old(); |
2556 | locks_end_grace(&nfsd4_manager); | 2541 | locks_end_grace(&nfsd4_manager); |
2542 | /* | ||
2543 | * Now that every NFSv4 client has had the chance to recover and | ||
2544 | * to see the (possibly new, possibly shorter) lease time, we | ||
2545 | * can safely set the next grace time to the current lease time: | ||
2546 | */ | ||
2547 | nfsd4_grace = nfsd4_lease; | ||
2557 | } | 2548 | } |
2558 | 2549 | ||
2559 | static time_t | 2550 | static time_t |
@@ -2563,9 +2554,9 @@ nfs4_laundromat(void) | |||
2563 | struct nfs4_stateowner *sop; | 2554 | struct nfs4_stateowner *sop; |
2564 | struct nfs4_delegation *dp; | 2555 | struct nfs4_delegation *dp; |
2565 | struct list_head *pos, *next, reaplist; | 2556 | struct list_head *pos, *next, reaplist; |
2566 | time_t cutoff = get_seconds() - NFSD_LEASE_TIME; | 2557 | time_t cutoff = get_seconds() - nfsd4_lease; |
2567 | time_t t, clientid_val = NFSD_LEASE_TIME; | 2558 | time_t t, clientid_val = nfsd4_lease; |
2568 | time_t u, test_val = NFSD_LEASE_TIME; | 2559 | time_t u, test_val = nfsd4_lease; |
2569 | 2560 | ||
2570 | nfs4_lock_state(); | 2561 | nfs4_lock_state(); |
2571 | 2562 | ||
@@ -2605,7 +2596,7 @@ nfs4_laundromat(void) | |||
2605 | list_del_init(&dp->dl_recall_lru); | 2596 | list_del_init(&dp->dl_recall_lru); |
2606 | unhash_delegation(dp); | 2597 | unhash_delegation(dp); |
2607 | } | 2598 | } |
2608 | test_val = NFSD_LEASE_TIME; | 2599 | test_val = nfsd4_lease; |
2609 | list_for_each_safe(pos, next, &close_lru) { | 2600 | list_for_each_safe(pos, next, &close_lru) { |
2610 | sop = list_entry(pos, struct nfs4_stateowner, so_close_lru); | 2601 | sop = list_entry(pos, struct nfs4_stateowner, so_close_lru); |
2611 | if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) { | 2602 | if (time_after((unsigned long)sop->so_time, (unsigned long)cutoff)) { |
@@ -2675,7 +2666,7 @@ EXPIRED_STATEID(stateid_t *stateid) | |||
2675 | { | 2666 | { |
2676 | if (time_before((unsigned long)boot_time, | 2667 | if (time_before((unsigned long)boot_time, |
2677 | ((unsigned long)stateid->si_boot)) && | 2668 | ((unsigned long)stateid->si_boot)) && |
2678 | time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) { | 2669 | time_before((unsigned long)(stateid->si_boot + nfsd4_lease), get_seconds())) { |
2679 | dprintk("NFSD: expired stateid " STATEID_FMT "!\n", | 2670 | dprintk("NFSD: expired stateid " STATEID_FMT "!\n", |
2680 | STATEID_VAL(stateid)); | 2671 | STATEID_VAL(stateid)); |
2681 | return 1; | 2672 | return 1; |
@@ -3976,12 +3967,6 @@ nfsd4_load_reboot_recovery_data(void) | |||
3976 | printk("NFSD: Failure reading reboot recovery data\n"); | 3967 | printk("NFSD: Failure reading reboot recovery data\n"); |
3977 | } | 3968 | } |
3978 | 3969 | ||
3979 | unsigned long | ||
3980 | get_nfs4_grace_period(void) | ||
3981 | { | ||
3982 | return max(user_lease_time, lease_time) * HZ; | ||
3983 | } | ||
3984 | |||
3985 | /* | 3970 | /* |
3986 | * Since the lifetime of a delegation isn't limited to that of an open, a | 3971 | * Since the lifetime of a delegation isn't limited to that of an open, a |
3987 | * client may quite reasonably hang on to a delegation as long as it has | 3972 | * client may quite reasonably hang on to a delegation as long as it has |
@@ -4008,20 +3993,27 @@ set_max_delegations(void) | |||
4008 | static int | 3993 | static int |
4009 | __nfs4_state_start(void) | 3994 | __nfs4_state_start(void) |
4010 | { | 3995 | { |
4011 | unsigned long grace_time; | 3996 | int ret; |
4012 | 3997 | ||
4013 | boot_time = get_seconds(); | 3998 | boot_time = get_seconds(); |
4014 | grace_time = get_nfs4_grace_period(); | ||
4015 | lease_time = user_lease_time; | ||
4016 | locks_start_grace(&nfsd4_manager); | 3999 | locks_start_grace(&nfsd4_manager); |
4017 | printk(KERN_INFO "NFSD: starting %ld-second grace period\n", | 4000 | printk(KERN_INFO "NFSD: starting %ld-second grace period\n", |
4018 | grace_time/HZ); | 4001 | nfsd4_grace); |
4002 | ret = set_callback_cred(); | ||
4003 | if (ret) | ||
4004 | return -ENOMEM; | ||
4019 | laundry_wq = create_singlethread_workqueue("nfsd4"); | 4005 | laundry_wq = create_singlethread_workqueue("nfsd4"); |
4020 | if (laundry_wq == NULL) | 4006 | if (laundry_wq == NULL) |
4021 | return -ENOMEM; | 4007 | return -ENOMEM; |
4022 | queue_delayed_work(laundry_wq, &laundromat_work, grace_time); | 4008 | ret = nfsd4_create_callback_queue(); |
4009 | if (ret) | ||
4010 | goto out_free_laundry; | ||
4011 | queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ); | ||
4023 | set_max_delegations(); | 4012 | set_max_delegations(); |
4024 | return set_callback_cred(); | 4013 | return 0; |
4014 | out_free_laundry: | ||
4015 | destroy_workqueue(laundry_wq); | ||
4016 | return ret; | ||
4025 | } | 4017 | } |
4026 | 4018 | ||
4027 | int | 4019 | int |
@@ -4039,12 +4031,6 @@ nfs4_state_start(void) | |||
4039 | return 0; | 4031 | return 0; |
4040 | } | 4032 | } |
4041 | 4033 | ||
4042 | time_t | ||
4043 | nfs4_lease_time(void) | ||
4044 | { | ||
4045 | return lease_time; | ||
4046 | } | ||
4047 | |||
4048 | static void | 4034 | static void |
4049 | __nfs4_state_shutdown(void) | 4035 | __nfs4_state_shutdown(void) |
4050 | { | 4036 | { |
@@ -4089,6 +4075,7 @@ nfs4_state_shutdown(void) | |||
4089 | nfs4_lock_state(); | 4075 | nfs4_lock_state(); |
4090 | nfs4_release_reclaim(); | 4076 | nfs4_release_reclaim(); |
4091 | __nfs4_state_shutdown(); | 4077 | __nfs4_state_shutdown(); |
4078 | nfsd4_destroy_callback_queue(); | ||
4092 | nfs4_unlock_state(); | 4079 | nfs4_unlock_state(); |
4093 | } | 4080 | } |
4094 | 4081 | ||
@@ -4128,21 +4115,3 @@ nfs4_recoverydir(void) | |||
4128 | { | 4115 | { |
4129 | return user_recovery_dirname; | 4116 | return user_recovery_dirname; |
4130 | } | 4117 | } |
4131 | |||
4132 | /* | ||
4133 | * Called when leasetime is changed. | ||
4134 | * | ||
4135 | * The only way the protocol gives us to handle on-the-fly lease changes is to | ||
4136 | * simulate a reboot. Instead of doing that, we just wait till the next time | ||
4137 | * we start to register any changes in lease time. If the administrator | ||
4138 | * really wants to change the lease time *now*, they can go ahead and bring | ||
4139 | * nfsd down and then back up again after changing the lease time. | ||
4140 | * | ||
4141 | * user_lease_time is protected by nfsd_mutex since it's only really accessed | ||
4142 | * when nfsd is starting | ||
4143 | */ | ||
4144 | void | ||
4145 | nfs4_reset_lease(time_t leasetime) | ||
4146 | { | ||
4147 | user_lease_time = leasetime; | ||
4148 | } | ||
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 34ccf815ea8a..5c2de471329a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -1900,7 +1900,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, | |||
1900 | if (bmval0 & FATTR4_WORD0_LEASE_TIME) { | 1900 | if (bmval0 & FATTR4_WORD0_LEASE_TIME) { |
1901 | if ((buflen -= 4) < 0) | 1901 | if ((buflen -= 4) < 0) |
1902 | goto out_resource; | 1902 | goto out_resource; |
1903 | WRITE32(NFSD_LEASE_TIME); | 1903 | WRITE32(nfsd4_lease); |
1904 | } | 1904 | } |
1905 | if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) { | 1905 | if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) { |
1906 | if ((buflen -= 4) < 0) | 1906 | if ((buflen -= 4) < 0) |
@@ -3307,11 +3307,13 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo | |||
3307 | iov = &rqstp->rq_res.head[0]; | 3307 | iov = &rqstp->rq_res.head[0]; |
3308 | iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base; | 3308 | iov->iov_len = ((char*)resp->p) - (char*)iov->iov_base; |
3309 | BUG_ON(iov->iov_len > PAGE_SIZE); | 3309 | BUG_ON(iov->iov_len > PAGE_SIZE); |
3310 | if (nfsd4_has_session(cs) && cs->status != nfserr_replay_cache) { | 3310 | if (nfsd4_has_session(cs)) { |
3311 | nfsd4_store_cache_entry(resp); | 3311 | if (cs->status != nfserr_replay_cache) { |
3312 | dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); | 3312 | nfsd4_store_cache_entry(resp); |
3313 | resp->cstate.slot->sl_inuse = false; | 3313 | dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); |
3314 | nfsd4_put_session(resp->cstate.session); | 3314 | cs->slot->sl_inuse = false; |
3315 | } | ||
3316 | nfsd4_put_session(cs->session); | ||
3315 | } | 3317 | } |
3316 | return 1; | 3318 | return 1; |
3317 | } | 3319 | } |
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index e3591073098f..bc3194ea01f5 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -46,6 +46,7 @@ enum { | |||
46 | */ | 46 | */ |
47 | #ifdef CONFIG_NFSD_V4 | 47 | #ifdef CONFIG_NFSD_V4 |
48 | NFSD_Leasetime, | 48 | NFSD_Leasetime, |
49 | NFSD_Gracetime, | ||
49 | NFSD_RecoveryDir, | 50 | NFSD_RecoveryDir, |
50 | #endif | 51 | #endif |
51 | }; | 52 | }; |
@@ -70,6 +71,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size); | |||
70 | static ssize_t write_maxblksize(struct file *file, char *buf, size_t size); | 71 | static ssize_t write_maxblksize(struct file *file, char *buf, size_t size); |
71 | #ifdef CONFIG_NFSD_V4 | 72 | #ifdef CONFIG_NFSD_V4 |
72 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size); | 73 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size); |
74 | static ssize_t write_gracetime(struct file *file, char *buf, size_t size); | ||
73 | static ssize_t write_recoverydir(struct file *file, char *buf, size_t size); | 75 | static ssize_t write_recoverydir(struct file *file, char *buf, size_t size); |
74 | #endif | 76 | #endif |
75 | 77 | ||
@@ -91,6 +93,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = { | |||
91 | [NFSD_MaxBlkSize] = write_maxblksize, | 93 | [NFSD_MaxBlkSize] = write_maxblksize, |
92 | #ifdef CONFIG_NFSD_V4 | 94 | #ifdef CONFIG_NFSD_V4 |
93 | [NFSD_Leasetime] = write_leasetime, | 95 | [NFSD_Leasetime] = write_leasetime, |
96 | [NFSD_Gracetime] = write_gracetime, | ||
94 | [NFSD_RecoveryDir] = write_recoverydir, | 97 | [NFSD_RecoveryDir] = write_recoverydir, |
95 | #endif | 98 | #endif |
96 | }; | 99 | }; |
@@ -1204,29 +1207,45 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size) | |||
1204 | } | 1207 | } |
1205 | 1208 | ||
1206 | #ifdef CONFIG_NFSD_V4 | 1209 | #ifdef CONFIG_NFSD_V4 |
1207 | extern time_t nfs4_leasetime(void); | 1210 | static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time) |
1208 | |||
1209 | static ssize_t __write_leasetime(struct file *file, char *buf, size_t size) | ||
1210 | { | 1211 | { |
1211 | /* if size > 10 seconds, call | ||
1212 | * nfs4_reset_lease() then write out the new lease (seconds) as reply | ||
1213 | */ | ||
1214 | char *mesg = buf; | 1212 | char *mesg = buf; |
1215 | int rv, lease; | 1213 | int rv, i; |
1216 | 1214 | ||
1217 | if (size > 0) { | 1215 | if (size > 0) { |
1218 | if (nfsd_serv) | 1216 | if (nfsd_serv) |
1219 | return -EBUSY; | 1217 | return -EBUSY; |
1220 | rv = get_int(&mesg, &lease); | 1218 | rv = get_int(&mesg, &i); |
1221 | if (rv) | 1219 | if (rv) |
1222 | return rv; | 1220 | return rv; |
1223 | if (lease < 10 || lease > 3600) | 1221 | /* |
1222 | * Some sanity checking. We don't have a reason for | ||
1223 | * these particular numbers, but problems with the | ||
1224 | * extremes are: | ||
1225 | * - Too short: the briefest network outage may | ||
1226 | * cause clients to lose all their locks. Also, | ||
1227 | * the frequent polling may be wasteful. | ||
1228 | * - Too long: do you really want reboot recovery | ||
1229 | * to take more than an hour? Or to make other | ||
1230 | * clients wait an hour before being able to | ||
1231 | * revoke a dead client's locks? | ||
1232 | */ | ||
1233 | if (i < 10 || i > 3600) | ||
1224 | return -EINVAL; | 1234 | return -EINVAL; |
1225 | nfs4_reset_lease(lease); | 1235 | *time = i; |
1226 | } | 1236 | } |
1227 | 1237 | ||
1228 | return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", | 1238 | return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n", *time); |
1229 | nfs4_lease_time()); | 1239 | } |
1240 | |||
1241 | static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size, time_t *time) | ||
1242 | { | ||
1243 | ssize_t rv; | ||
1244 | |||
1245 | mutex_lock(&nfsd_mutex); | ||
1246 | rv = __nfsd4_write_time(file, buf, size, time); | ||
1247 | mutex_unlock(&nfsd_mutex); | ||
1248 | return rv; | ||
1230 | } | 1249 | } |
1231 | 1250 | ||
1232 | /** | 1251 | /** |
@@ -1252,12 +1271,22 @@ static ssize_t __write_leasetime(struct file *file, char *buf, size_t size) | |||
1252 | */ | 1271 | */ |
1253 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size) | 1272 | static ssize_t write_leasetime(struct file *file, char *buf, size_t size) |
1254 | { | 1273 | { |
1255 | ssize_t rv; | 1274 | return nfsd4_write_time(file, buf, size, &nfsd4_lease); |
1275 | } | ||
1256 | 1276 | ||
1257 | mutex_lock(&nfsd_mutex); | 1277 | /** |
1258 | rv = __write_leasetime(file, buf, size); | 1278 | * write_gracetime - Set or report current NFSv4 grace period time |
1259 | mutex_unlock(&nfsd_mutex); | 1279 | * |
1260 | return rv; | 1280 | * As above, but sets the time of the NFSv4 grace period. |
1281 | * | ||
1282 | * Note this should never be set to less than the *previous* | ||
1283 | * lease-period time, but we don't try to enforce this. (In the common | ||
1284 | * case (a new boot), we don't know what the previous lease time was | ||
1285 | * anyway.) | ||
1286 | */ | ||
1287 | static ssize_t write_gracetime(struct file *file, char *buf, size_t size) | ||
1288 | { | ||
1289 | return nfsd4_write_time(file, buf, size, &nfsd4_grace); | ||
1261 | } | 1290 | } |
1262 | 1291 | ||
1263 | extern char *nfs4_recoverydir(void); | 1292 | extern char *nfs4_recoverydir(void); |
@@ -1351,6 +1380,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent) | |||
1351 | [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, | 1380 | [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, |
1352 | #ifdef CONFIG_NFSD_V4 | 1381 | #ifdef CONFIG_NFSD_V4 |
1353 | [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, | 1382 | [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, |
1383 | [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, | ||
1354 | [NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR}, | 1384 | [NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR}, |
1355 | #endif | 1385 | #endif |
1356 | /* last one */ {""} | 1386 | /* last one */ {""} |
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index e942a1aaac92..72377761270e 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h | |||
@@ -82,7 +82,6 @@ int nfs4_state_init(void); | |||
82 | void nfsd4_free_slabs(void); | 82 | void nfsd4_free_slabs(void); |
83 | int nfs4_state_start(void); | 83 | int nfs4_state_start(void); |
84 | void nfs4_state_shutdown(void); | 84 | void nfs4_state_shutdown(void); |
85 | time_t nfs4_lease_time(void); | ||
86 | void nfs4_reset_lease(time_t leasetime); | 85 | void nfs4_reset_lease(time_t leasetime); |
87 | int nfs4_reset_recoverydir(char *recdir); | 86 | int nfs4_reset_recoverydir(char *recdir); |
88 | #else | 87 | #else |
@@ -90,7 +89,6 @@ static inline int nfs4_state_init(void) { return 0; } | |||
90 | static inline void nfsd4_free_slabs(void) { } | 89 | static inline void nfsd4_free_slabs(void) { } |
91 | static inline int nfs4_state_start(void) { return 0; } | 90 | static inline int nfs4_state_start(void) { return 0; } |
92 | static inline void nfs4_state_shutdown(void) { } | 91 | static inline void nfs4_state_shutdown(void) { } |
93 | static inline time_t nfs4_lease_time(void) { return 0; } | ||
94 | static inline void nfs4_reset_lease(time_t leasetime) { } | 92 | static inline void nfs4_reset_lease(time_t leasetime) { } |
95 | static inline int nfs4_reset_recoverydir(char *recdir) { return 0; } | 93 | static inline int nfs4_reset_recoverydir(char *recdir) { return 0; } |
96 | #endif | 94 | #endif |
@@ -229,6 +227,9 @@ extern struct timeval nfssvc_boot; | |||
229 | 227 | ||
230 | #ifdef CONFIG_NFSD_V4 | 228 | #ifdef CONFIG_NFSD_V4 |
231 | 229 | ||
230 | extern time_t nfsd4_lease; | ||
231 | extern time_t nfsd4_grace; | ||
232 | |||
232 | /* before processing a COMPOUND operation, we have to check that there | 233 | /* before processing a COMPOUND operation, we have to check that there |
233 | * is enough space in the buffer for XDR encode to succeed. otherwise, | 234 | * is enough space in the buffer for XDR encode to succeed. otherwise, |
234 | * we might process an operation with side effects, and be unable to | 235 | * we might process an operation with side effects, and be unable to |
@@ -247,7 +248,6 @@ extern struct timeval nfssvc_boot; | |||
247 | #define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */ | 248 | #define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */ |
248 | #define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */ | 249 | #define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */ |
249 | 250 | ||
250 | #define NFSD_LEASE_TIME (nfs4_lease_time()) | ||
251 | #define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */ | 251 | #define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */ |
252 | 252 | ||
253 | /* | 253 | /* |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index fefeae27f25e..98836fd87f69 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
@@ -70,6 +70,16 @@ struct nfsd4_cb_sequence { | |||
70 | struct nfs4_client *cbs_clp; | 70 | struct nfs4_client *cbs_clp; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | struct nfs4_rpc_args { | ||
74 | void *args_op; | ||
75 | struct nfsd4_cb_sequence args_seq; | ||
76 | }; | ||
77 | |||
78 | struct nfsd4_callback { | ||
79 | struct nfs4_rpc_args cb_args; | ||
80 | struct work_struct cb_work; | ||
81 | }; | ||
82 | |||
73 | struct nfs4_delegation { | 83 | struct nfs4_delegation { |
74 | struct list_head dl_perfile; | 84 | struct list_head dl_perfile; |
75 | struct list_head dl_perclnt; | 85 | struct list_head dl_perclnt; |
@@ -86,6 +96,7 @@ struct nfs4_delegation { | |||
86 | stateid_t dl_stateid; | 96 | stateid_t dl_stateid; |
87 | struct knfsd_fh dl_fh; | 97 | struct knfsd_fh dl_fh; |
88 | int dl_retries; | 98 | int dl_retries; |
99 | struct nfsd4_callback dl_recall; | ||
89 | }; | 100 | }; |
90 | 101 | ||
91 | /* client delegation callback info */ | 102 | /* client delegation callback info */ |
@@ -96,9 +107,7 @@ struct nfs4_cb_conn { | |||
96 | u32 cb_prog; | 107 | u32 cb_prog; |
97 | u32 cb_minorversion; | 108 | u32 cb_minorversion; |
98 | u32 cb_ident; /* minorversion 0 only */ | 109 | u32 cb_ident; /* minorversion 0 only */ |
99 | /* RPC client info */ | 110 | struct svc_xprt *cb_xprt; /* minorversion 1 only */ |
100 | atomic_t cb_set; /* successful CB_NULL call */ | ||
101 | struct rpc_clnt * cb_client; | ||
102 | }; | 111 | }; |
103 | 112 | ||
104 | /* Maximum number of slots per session. 160 is useful for long haul TCP */ | 113 | /* Maximum number of slots per session. 160 is useful for long haul TCP */ |
@@ -212,10 +221,13 @@ struct nfs4_client { | |||
212 | struct svc_cred cl_cred; /* setclientid principal */ | 221 | struct svc_cred cl_cred; /* setclientid principal */ |
213 | clientid_t cl_clientid; /* generated by server */ | 222 | clientid_t cl_clientid; /* generated by server */ |
214 | nfs4_verifier cl_confirm; /* generated by server */ | 223 | nfs4_verifier cl_confirm; /* generated by server */ |
215 | struct nfs4_cb_conn cl_cb_conn; /* callback info */ | ||
216 | atomic_t cl_count; /* ref count */ | ||
217 | u32 cl_firststate; /* recovery dir creation */ | 224 | u32 cl_firststate; /* recovery dir creation */ |
218 | 225 | ||
226 | /* for v4.0 and v4.1 callbacks: */ | ||
227 | struct nfs4_cb_conn cl_cb_conn; | ||
228 | struct rpc_clnt *cl_cb_client; | ||
229 | atomic_t cl_cb_set; | ||
230 | |||
219 | /* for nfs41 */ | 231 | /* for nfs41 */ |
220 | struct list_head cl_sessions; | 232 | struct list_head cl_sessions; |
221 | struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ | 233 | struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ |
@@ -226,7 +238,6 @@ struct nfs4_client { | |||
226 | /* We currently support a single back channel with a single slot */ | 238 | /* We currently support a single back channel with a single slot */ |
227 | unsigned long cl_cb_slot_busy; | 239 | unsigned long cl_cb_slot_busy; |
228 | u32 cl_cb_seq_nr; | 240 | u32 cl_cb_seq_nr; |
229 | struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ | ||
230 | struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ | 241 | struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ |
231 | /* wait here for slots */ | 242 | /* wait here for slots */ |
232 | }; | 243 | }; |
@@ -377,11 +388,14 @@ extern void nfs4_lock_state(void); | |||
377 | extern void nfs4_unlock_state(void); | 388 | extern void nfs4_unlock_state(void); |
378 | extern int nfs4_in_grace(void); | 389 | extern int nfs4_in_grace(void); |
379 | extern __be32 nfs4_check_open_reclaim(clientid_t *clid); | 390 | extern __be32 nfs4_check_open_reclaim(clientid_t *clid); |
380 | extern void put_nfs4_client(struct nfs4_client *clp); | ||
381 | extern void nfs4_free_stateowner(struct kref *kref); | 391 | extern void nfs4_free_stateowner(struct kref *kref); |
382 | extern int set_callback_cred(void); | 392 | extern int set_callback_cred(void); |
383 | extern void nfsd4_probe_callback(struct nfs4_client *clp); | 393 | extern void nfsd4_probe_callback(struct nfs4_client *clp, struct nfs4_cb_conn *); |
394 | extern void nfsd4_do_callback_rpc(struct work_struct *); | ||
384 | extern void nfsd4_cb_recall(struct nfs4_delegation *dp); | 395 | extern void nfsd4_cb_recall(struct nfs4_delegation *dp); |
396 | extern int nfsd4_create_callback_queue(void); | ||
397 | extern void nfsd4_destroy_callback_queue(void); | ||
398 | extern void nfsd4_set_callback_client(struct nfs4_client *, struct rpc_clnt *); | ||
385 | extern void nfs4_put_delegation(struct nfs4_delegation *dp); | 399 | extern void nfs4_put_delegation(struct nfs4_delegation *dp); |
386 | extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); | 400 | extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); |
387 | extern void nfsd4_init_recdir(char *recdir_name); | 401 | extern void nfsd4_init_recdir(char *recdir_name); |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 6dd5f1970e01..23c06f77f4ca 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -724,7 +724,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, | |||
724 | struct inode *inode; | 724 | struct inode *inode; |
725 | int flags = O_RDONLY|O_LARGEFILE; | 725 | int flags = O_RDONLY|O_LARGEFILE; |
726 | __be32 err; | 726 | __be32 err; |
727 | int host_err; | 727 | int host_err = 0; |
728 | 728 | ||
729 | validate_process_creds(); | 729 | validate_process_creds(); |
730 | 730 | ||
@@ -761,7 +761,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, | |||
761 | * Check to see if there are any leases on this file. | 761 | * Check to see if there are any leases on this file. |
762 | * This may block while leases are broken. | 762 | * This may block while leases are broken. |
763 | */ | 763 | */ |
764 | host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); | 764 | if (!(access & NFSD_MAY_NOT_BREAK_LEASE)) |
765 | host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); | ||
765 | if (host_err == -EWOULDBLOCK) | 766 | if (host_err == -EWOULDBLOCK) |
766 | host_err = -ETIMEDOUT; | 767 | host_err = -ETIMEDOUT; |
767 | if (host_err) /* NOMEM or WOULDBLOCK */ | 768 | if (host_err) /* NOMEM or WOULDBLOCK */ |
@@ -1169,7 +1170,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
1169 | goto out; | 1170 | goto out; |
1170 | } | 1171 | } |
1171 | 1172 | ||
1172 | err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file); | 1173 | err = nfsd_open(rqstp, fhp, S_IFREG, |
1174 | NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file); | ||
1173 | if (err) | 1175 | if (err) |
1174 | goto out; | 1176 | goto out; |
1175 | if (EX_ISSYNC(fhp->fh_export)) { | 1177 | if (EX_ISSYNC(fhp->fh_export)) { |
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index 4b1de0a9ea75..217a62c2a357 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #define NFSD_MAY_OWNER_OVERRIDE 64 | 20 | #define NFSD_MAY_OWNER_OVERRIDE 64 |
21 | #define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ | 21 | #define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ |
22 | #define NFSD_MAY_BYPASS_GSS_ON_ROOT 256 | 22 | #define NFSD_MAY_BYPASS_GSS_ON_ROOT 256 |
23 | #define NFSD_MAY_NOT_BREAK_LEASE 512 | ||
23 | 24 | ||
24 | #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) | 25 | #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) |
25 | #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) | 26 | #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) |
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index efa337739534..c28958ec216c 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h | |||
@@ -513,9 +513,8 @@ extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); | |||
513 | extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, | 513 | extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, |
514 | struct nfsd4_sequence *seq); | 514 | struct nfsd4_sequence *seq); |
515 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, | 515 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, |
516 | struct nfsd4_compound_state *, | 516 | struct nfsd4_compound_state *, struct nfsd4_exchange_id *); |
517 | struct nfsd4_exchange_id *); | 517 | extern __be32 nfsd4_create_session(struct svc_rqst *, |
518 | extern __be32 nfsd4_create_session(struct svc_rqst *, | ||
519 | struct nfsd4_compound_state *, | 518 | struct nfsd4_compound_state *, |
520 | struct nfsd4_create_session *); | 519 | struct nfsd4_create_session *); |
521 | extern __be32 nfsd4_sequence(struct svc_rqst *, | 520 | extern __be32 nfsd4_sequence(struct svc_rqst *, |
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index 65e333afaee4..80d55bbc5365 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h | |||
@@ -40,12 +40,12 @@ struct nfs_fhbase_old { | |||
40 | * This is the new flexible, extensible style NFSv2/v3 file handle. | 40 | * This is the new flexible, extensible style NFSv2/v3 file handle. |
41 | * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000 | 41 | * by Neil Brown <neilb@cse.unsw.edu.au> - March 2000 |
42 | * | 42 | * |
43 | * The file handle is seens as a list of 4byte words. | 43 | * The file handle starts with a sequence of four-byte words. |
44 | * The first word contains a version number (1) and four descriptor bytes | 44 | * The first word contains a version number (1) and three descriptor bytes |
45 | * that tell how the remaining 3 variable length fields should be handled. | 45 | * that tell how the remaining 3 variable length fields should be handled. |
46 | * These three bytes are auth_type, fsid_type and fileid_type. | 46 | * These three bytes are auth_type, fsid_type and fileid_type. |
47 | * | 47 | * |
48 | * All 4byte values are in host-byte-order. | 48 | * All four-byte values are in host-byte-order. |
49 | * | 49 | * |
50 | * The auth_type field specifies how the filehandle can be authenticated | 50 | * The auth_type field specifies how the filehandle can be authenticated |
51 | * This might allow a file to be confirmed to be in a writable part of a | 51 | * This might allow a file to be confirmed to be in a writable part of a |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 39bddba53ba1..a3f340c8b79a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -49,11 +49,17 @@ static void cache_init(struct cache_head *h) | |||
49 | h->last_refresh = now; | 49 | h->last_refresh = now; |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) | ||
53 | { | ||
54 | return (h->expiry_time < get_seconds()) || | ||
55 | (detail->flush_time > h->last_refresh); | ||
56 | } | ||
57 | |||
52 | struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | 58 | struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, |
53 | struct cache_head *key, int hash) | 59 | struct cache_head *key, int hash) |
54 | { | 60 | { |
55 | struct cache_head **head, **hp; | 61 | struct cache_head **head, **hp; |
56 | struct cache_head *new = NULL; | 62 | struct cache_head *new = NULL, *freeme = NULL; |
57 | 63 | ||
58 | head = &detail->hash_table[hash]; | 64 | head = &detail->hash_table[hash]; |
59 | 65 | ||
@@ -62,6 +68,9 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
62 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 68 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { |
63 | struct cache_head *tmp = *hp; | 69 | struct cache_head *tmp = *hp; |
64 | if (detail->match(tmp, key)) { | 70 | if (detail->match(tmp, key)) { |
71 | if (cache_is_expired(detail, tmp)) | ||
72 | /* This entry is expired, we will discard it. */ | ||
73 | break; | ||
65 | cache_get(tmp); | 74 | cache_get(tmp); |
66 | read_unlock(&detail->hash_lock); | 75 | read_unlock(&detail->hash_lock); |
67 | return tmp; | 76 | return tmp; |
@@ -86,6 +95,13 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
86 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { | 95 | for (hp=head; *hp != NULL ; hp = &(*hp)->next) { |
87 | struct cache_head *tmp = *hp; | 96 | struct cache_head *tmp = *hp; |
88 | if (detail->match(tmp, key)) { | 97 | if (detail->match(tmp, key)) { |
98 | if (cache_is_expired(detail, tmp)) { | ||
99 | *hp = tmp->next; | ||
100 | tmp->next = NULL; | ||
101 | detail->entries --; | ||
102 | freeme = tmp; | ||
103 | break; | ||
104 | } | ||
89 | cache_get(tmp); | 105 | cache_get(tmp); |
90 | write_unlock(&detail->hash_lock); | 106 | write_unlock(&detail->hash_lock); |
91 | cache_put(new, detail); | 107 | cache_put(new, detail); |
@@ -98,6 +114,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | |||
98 | cache_get(new); | 114 | cache_get(new); |
99 | write_unlock(&detail->hash_lock); | 115 | write_unlock(&detail->hash_lock); |
100 | 116 | ||
117 | if (freeme) | ||
118 | cache_put(freeme, detail); | ||
101 | return new; | 119 | return new; |
102 | } | 120 | } |
103 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); | 121 | EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); |
@@ -183,10 +201,7 @@ static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) | |||
183 | 201 | ||
184 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) | 202 | static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) |
185 | { | 203 | { |
186 | if (!test_bit(CACHE_VALID, &h->flags) || | 204 | if (!test_bit(CACHE_VALID, &h->flags)) |
187 | h->expiry_time < get_seconds()) | ||
188 | return -EAGAIN; | ||
189 | else if (detail->flush_time > h->last_refresh) | ||
190 | return -EAGAIN; | 205 | return -EAGAIN; |
191 | else { | 206 | else { |
192 | /* entry is valid */ | 207 | /* entry is valid */ |
@@ -397,31 +412,27 @@ static int cache_clean(void) | |||
397 | /* Ok, now to clean this strand */ | 412 | /* Ok, now to clean this strand */ |
398 | 413 | ||
399 | cp = & current_detail->hash_table[current_index]; | 414 | cp = & current_detail->hash_table[current_index]; |
400 | ch = *cp; | 415 | for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) { |
401 | for (; ch; cp= & ch->next, ch= *cp) { | ||
402 | if (current_detail->nextcheck > ch->expiry_time) | 416 | if (current_detail->nextcheck > ch->expiry_time) |
403 | current_detail->nextcheck = ch->expiry_time+1; | 417 | current_detail->nextcheck = ch->expiry_time+1; |
404 | if (ch->expiry_time >= get_seconds() && | 418 | if (!cache_is_expired(current_detail, ch)) |
405 | ch->last_refresh >= current_detail->flush_time) | ||
406 | continue; | 419 | continue; |
407 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) | ||
408 | cache_dequeue(current_detail, ch); | ||
409 | 420 | ||
410 | if (atomic_read(&ch->ref.refcount) == 1) | ||
411 | break; | ||
412 | } | ||
413 | if (ch) { | ||
414 | *cp = ch->next; | 421 | *cp = ch->next; |
415 | ch->next = NULL; | 422 | ch->next = NULL; |
416 | current_detail->entries--; | 423 | current_detail->entries--; |
417 | rv = 1; | 424 | rv = 1; |
425 | break; | ||
418 | } | 426 | } |
427 | |||
419 | write_unlock(¤t_detail->hash_lock); | 428 | write_unlock(¤t_detail->hash_lock); |
420 | d = current_detail; | 429 | d = current_detail; |
421 | if (!ch) | 430 | if (!ch) |
422 | current_index ++; | 431 | current_index ++; |
423 | spin_unlock(&cache_list_lock); | 432 | spin_unlock(&cache_list_lock); |
424 | if (ch) { | 433 | if (ch) { |
434 | if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) | ||
435 | cache_dequeue(current_detail, ch); | ||
425 | cache_revisit_request(ch); | 436 | cache_revisit_request(ch); |
426 | cache_put(ch, d); | 437 | cache_put(ch, d); |
427 | } | 438 | } |
@@ -1233,8 +1244,10 @@ static int content_open(struct inode *inode, struct file *file, | |||
1233 | if (!cd || !try_module_get(cd->owner)) | 1244 | if (!cd || !try_module_get(cd->owner)) |
1234 | return -EACCES; | 1245 | return -EACCES; |
1235 | han = __seq_open_private(file, &cache_content_op, sizeof(*han)); | 1246 | han = __seq_open_private(file, &cache_content_op, sizeof(*han)); |
1236 | if (han == NULL) | 1247 | if (han == NULL) { |
1248 | module_put(cd->owner); | ||
1237 | return -ENOMEM; | 1249 | return -ENOMEM; |
1250 | } | ||
1238 | 1251 | ||
1239 | han->cd = cd; | 1252 | han->cd = cd; |
1240 | return 0; | 1253 | return 0; |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 061b2e0f9118..cbc084939dd8 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -744,8 +744,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
744 | if (rqstp->rq_deferred) { | 744 | if (rqstp->rq_deferred) { |
745 | svc_xprt_received(xprt); | 745 | svc_xprt_received(xprt); |
746 | len = svc_deferred_recv(rqstp); | 746 | len = svc_deferred_recv(rqstp); |
747 | } else | 747 | } else { |
748 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); | 748 | len = xprt->xpt_ops->xpo_recvfrom(rqstp); |
749 | svc_xprt_received(xprt); | ||
750 | } | ||
749 | dprintk("svc: got len=%d\n", len); | 751 | dprintk("svc: got len=%d\n", len); |
750 | } | 752 | } |
751 | 753 | ||
@@ -893,12 +895,12 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
893 | */ | 895 | */ |
894 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 896 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
895 | serv->sv_tmpcnt--; | 897 | serv->sv_tmpcnt--; |
898 | spin_unlock_bh(&serv->sv_lock); | ||
896 | 899 | ||
897 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) | 900 | while ((dr = svc_deferred_dequeue(xprt)) != NULL) |
898 | kfree(dr); | 901 | kfree(dr); |
899 | 902 | ||
900 | svc_xprt_put(xprt); | 903 | svc_xprt_put(xprt); |
901 | spin_unlock_bh(&serv->sv_lock); | ||
902 | } | 904 | } |
903 | 905 | ||
904 | void svc_close_xprt(struct svc_xprt *xprt) | 906 | void svc_close_xprt(struct svc_xprt *xprt) |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index a29f259204e6..a33892733643 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -547,7 +547,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
547 | dprintk("svc: recvfrom returned error %d\n", -err); | 547 | dprintk("svc: recvfrom returned error %d\n", -err); |
548 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); | 548 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); |
549 | } | 549 | } |
550 | svc_xprt_received(&svsk->sk_xprt); | ||
551 | return -EAGAIN; | 550 | return -EAGAIN; |
552 | } | 551 | } |
553 | len = svc_addr_len(svc_addr(rqstp)); | 552 | len = svc_addr_len(svc_addr(rqstp)); |
@@ -562,11 +561,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp) | |||
562 | svsk->sk_sk->sk_stamp = skb->tstamp; | 561 | svsk->sk_sk->sk_stamp = skb->tstamp; |
563 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ | 562 | set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ |
564 | 563 | ||
565 | /* | ||
566 | * Maybe more packets - kick another thread ASAP. | ||
567 | */ | ||
568 | svc_xprt_received(&svsk->sk_xprt); | ||
569 | |||
570 | len = skb->len - sizeof(struct udphdr); | 564 | len = skb->len - sizeof(struct udphdr); |
571 | rqstp->rq_arg.len = len; | 565 | rqstp->rq_arg.len = len; |
572 | 566 | ||
@@ -917,7 +911,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
917 | if (len < want) { | 911 | if (len < want) { |
918 | dprintk("svc: short recvfrom while reading record " | 912 | dprintk("svc: short recvfrom while reading record " |
919 | "length (%d of %d)\n", len, want); | 913 | "length (%d of %d)\n", len, want); |
920 | svc_xprt_received(&svsk->sk_xprt); | ||
921 | goto err_again; /* record header not complete */ | 914 | goto err_again; /* record header not complete */ |
922 | } | 915 | } |
923 | 916 | ||
@@ -953,7 +946,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
953 | if (len < svsk->sk_reclen) { | 946 | if (len < svsk->sk_reclen) { |
954 | dprintk("svc: incomplete TCP record (%d of %d)\n", | 947 | dprintk("svc: incomplete TCP record (%d of %d)\n", |
955 | len, svsk->sk_reclen); | 948 | len, svsk->sk_reclen); |
956 | svc_xprt_received(&svsk->sk_xprt); | ||
957 | goto err_again; /* record not complete */ | 949 | goto err_again; /* record not complete */ |
958 | } | 950 | } |
959 | len = svsk->sk_reclen; | 951 | len = svsk->sk_reclen; |
@@ -961,14 +953,11 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) | |||
961 | 953 | ||
962 | return len; | 954 | return len; |
963 | error: | 955 | error: |
964 | if (len == -EAGAIN) { | 956 | if (len == -EAGAIN) |
965 | dprintk("RPC: TCP recv_record got EAGAIN\n"); | 957 | dprintk("RPC: TCP recv_record got EAGAIN\n"); |
966 | svc_xprt_received(&svsk->sk_xprt); | ||
967 | } | ||
968 | return len; | 958 | return len; |
969 | err_delete: | 959 | err_delete: |
970 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); | 960 | set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); |
971 | svc_xprt_received(&svsk->sk_xprt); | ||
972 | err_again: | 961 | err_again: |
973 | return -EAGAIN; | 962 | return -EAGAIN; |
974 | } | 963 | } |
@@ -1110,7 +1099,6 @@ out: | |||
1110 | svsk->sk_tcplen = 0; | 1099 | svsk->sk_tcplen = 0; |
1111 | 1100 | ||
1112 | svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); | 1101 | svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); |
1113 | svc_xprt_received(&svsk->sk_xprt); | ||
1114 | if (serv->sv_stats) | 1102 | if (serv->sv_stats) |
1115 | serv->sv_stats->nettcpcnt++; | 1103 | serv->sv_stats->nettcpcnt++; |
1116 | 1104 | ||
@@ -1119,7 +1107,6 @@ out: | |||
1119 | err_again: | 1107 | err_again: |
1120 | if (len == -EAGAIN) { | 1108 | if (len == -EAGAIN) { |
1121 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); | 1109 | dprintk("RPC: TCP recvfrom got EAGAIN\n"); |
1122 | svc_xprt_received(&svsk->sk_xprt); | ||
1123 | return len; | 1110 | return len; |
1124 | } | 1111 | } |
1125 | error: | 1112 | error: |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index f92e37eb413c..0194de814933 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -566,7 +566,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp, | |||
566 | ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, | 566 | ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base, |
567 | rqstp->rq_arg.head[0].iov_len); | 567 | rqstp->rq_arg.head[0].iov_len); |
568 | 568 | ||
569 | svc_xprt_received(rqstp->rq_xprt); | ||
570 | return ret; | 569 | return ret; |
571 | } | 570 | } |
572 | 571 | ||
@@ -665,7 +664,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) | |||
665 | rqstp->rq_arg.head[0].iov_len); | 664 | rqstp->rq_arg.head[0].iov_len); |
666 | rqstp->rq_prot = IPPROTO_MAX; | 665 | rqstp->rq_prot = IPPROTO_MAX; |
667 | svc_xprt_copy_addrs(rqstp, xprt); | 666 | svc_xprt_copy_addrs(rqstp, xprt); |
668 | svc_xprt_received(xprt); | ||
669 | return ret; | 667 | return ret; |
670 | 668 | ||
671 | close_out: | 669 | close_out: |
@@ -678,6 +676,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) | |||
678 | */ | 676 | */ |
679 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 677 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
680 | defer: | 678 | defer: |
681 | svc_xprt_received(xprt); | ||
682 | return 0; | 679 | return 0; |
683 | } | 680 | } |