aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/nfs4state.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r--fs/nfs/nfs4state.c203
1 files changed, 114 insertions, 89 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0675f3215e0a..afad0255e7db 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -43,6 +43,8 @@
43#include <linux/smp_lock.h> 43#include <linux/smp_lock.h>
44#include <linux/nfs_fs.h> 44#include <linux/nfs_fs.h>
45#include <linux/nfs_idmap.h> 45#include <linux/nfs_idmap.h>
46#include <linux/kthread.h>
47#include <linux/module.h>
46#include <linux/workqueue.h> 48#include <linux/workqueue.h>
47#include <linux/bitops.h> 49#include <linux/bitops.h>
48 50
@@ -57,8 +59,6 @@ const nfs4_stateid zero_stateid;
57static DEFINE_SPINLOCK(state_spinlock); 59static DEFINE_SPINLOCK(state_spinlock);
58static LIST_HEAD(nfs4_clientid_list); 60static LIST_HEAD(nfs4_clientid_list);
59 61
60static void nfs4_recover_state(void *);
61
62void 62void
63init_nfsv4_state(struct nfs_server *server) 63init_nfsv4_state(struct nfs_server *server)
64{ 64{
@@ -91,11 +91,10 @@ nfs4_alloc_client(struct in_addr *addr)
91 91
92 if (nfs_callback_up() < 0) 92 if (nfs_callback_up() < 0)
93 return NULL; 93 return NULL;
94 if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) { 94 if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
95 nfs_callback_down(); 95 nfs_callback_down();
96 return NULL; 96 return NULL;
97 } 97 }
98 memset(clp, 0, sizeof(*clp));
99 memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr)); 98 memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
100 init_rwsem(&clp->cl_sem); 99 init_rwsem(&clp->cl_sem);
101 INIT_LIST_HEAD(&clp->cl_delegations); 100 INIT_LIST_HEAD(&clp->cl_delegations);
@@ -103,14 +102,12 @@ nfs4_alloc_client(struct in_addr *addr)
103 INIT_LIST_HEAD(&clp->cl_unused); 102 INIT_LIST_HEAD(&clp->cl_unused);
104 spin_lock_init(&clp->cl_lock); 103 spin_lock_init(&clp->cl_lock);
105 atomic_set(&clp->cl_count, 1); 104 atomic_set(&clp->cl_count, 1);
106 INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
107 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); 105 INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
108 INIT_LIST_HEAD(&clp->cl_superblocks); 106 INIT_LIST_HEAD(&clp->cl_superblocks);
109 init_waitqueue_head(&clp->cl_waitq);
110 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); 107 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
111 clp->cl_rpcclient = ERR_PTR(-EINVAL); 108 clp->cl_rpcclient = ERR_PTR(-EINVAL);
112 clp->cl_boot_time = CURRENT_TIME; 109 clp->cl_boot_time = CURRENT_TIME;
113 clp->cl_state = 1 << NFS4CLNT_OK; 110 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
114 return clp; 111 return clp;
115} 112}
116 113
@@ -127,8 +124,6 @@ nfs4_free_client(struct nfs4_client *clp)
127 kfree(sp); 124 kfree(sp);
128 } 125 }
129 BUG_ON(!list_empty(&clp->cl_state_owners)); 126 BUG_ON(!list_empty(&clp->cl_state_owners));
130 if (clp->cl_cred)
131 put_rpccred(clp->cl_cred);
132 nfs_idmap_delete(clp); 127 nfs_idmap_delete(clp);
133 if (!IS_ERR(clp->cl_rpcclient)) 128 if (!IS_ERR(clp->cl_rpcclient))
134 rpc_shutdown_client(clp->cl_rpcclient); 129 rpc_shutdown_client(clp->cl_rpcclient);
@@ -193,27 +188,22 @@ nfs4_put_client(struct nfs4_client *clp)
193 list_del(&clp->cl_servers); 188 list_del(&clp->cl_servers);
194 spin_unlock(&state_spinlock); 189 spin_unlock(&state_spinlock);
195 BUG_ON(!list_empty(&clp->cl_superblocks)); 190 BUG_ON(!list_empty(&clp->cl_superblocks));
196 wake_up_all(&clp->cl_waitq);
197 rpc_wake_up(&clp->cl_rpcwaitq); 191 rpc_wake_up(&clp->cl_rpcwaitq);
198 nfs4_kill_renewd(clp); 192 nfs4_kill_renewd(clp);
199 nfs4_free_client(clp); 193 nfs4_free_client(clp);
200} 194}
201 195
202static int __nfs4_init_client(struct nfs4_client *clp) 196static int nfs4_init_client(struct nfs4_client *clp, struct rpc_cred *cred)
203{ 197{
204 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport); 198 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
199 nfs_callback_tcpport, cred);
205 if (status == 0) 200 if (status == 0)
206 status = nfs4_proc_setclientid_confirm(clp); 201 status = nfs4_proc_setclientid_confirm(clp, cred);
207 if (status == 0) 202 if (status == 0)
208 nfs4_schedule_state_renewal(clp); 203 nfs4_schedule_state_renewal(clp);
209 return status; 204 return status;
210} 205}
211 206
212int nfs4_init_client(struct nfs4_client *clp)
213{
214 return nfs4_map_errors(__nfs4_init_client(clp));
215}
216
217u32 207u32
218nfs4_alloc_lockowner_id(struct nfs4_client *clp) 208nfs4_alloc_lockowner_id(struct nfs4_client *clp)
219{ 209{
@@ -235,6 +225,32 @@ nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
235 return sp; 225 return sp;
236} 226}
237 227
228struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp)
229{
230 struct nfs4_state_owner *sp;
231 struct rpc_cred *cred = NULL;
232
233 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
234 if (list_empty(&sp->so_states))
235 continue;
236 cred = get_rpccred(sp->so_cred);
237 break;
238 }
239 return cred;
240}
241
242struct rpc_cred *nfs4_get_setclientid_cred(struct nfs4_client *clp)
243{
244 struct nfs4_state_owner *sp;
245
246 if (!list_empty(&clp->cl_state_owners)) {
247 sp = list_entry(clp->cl_state_owners.next,
248 struct nfs4_state_owner, so_list);
249 return get_rpccred(sp->so_cred);
250 }
251 return NULL;
252}
253
238static struct nfs4_state_owner * 254static struct nfs4_state_owner *
239nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred) 255nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
240{ 256{
@@ -349,14 +365,9 @@ nfs4_alloc_open_state(void)
349{ 365{
350 struct nfs4_state *state; 366 struct nfs4_state *state;
351 367
352 state = kmalloc(sizeof(*state), GFP_KERNEL); 368 state = kzalloc(sizeof(*state), GFP_KERNEL);
353 if (!state) 369 if (!state)
354 return NULL; 370 return NULL;
355 state->state = 0;
356 state->nreaders = 0;
357 state->nwriters = 0;
358 state->flags = 0;
359 memset(state->stateid.data, 0, sizeof(state->stateid.data));
360 atomic_set(&state->count, 1); 371 atomic_set(&state->count, 1);
361 INIT_LIST_HEAD(&state->lock_states); 372 INIT_LIST_HEAD(&state->lock_states);
362 spin_lock_init(&state->state_lock); 373 spin_lock_init(&state->state_lock);
@@ -475,15 +486,23 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
475 /* Protect against nfs4_find_state() */ 486 /* Protect against nfs4_find_state() */
476 spin_lock(&owner->so_lock); 487 spin_lock(&owner->so_lock);
477 spin_lock(&inode->i_lock); 488 spin_lock(&inode->i_lock);
478 if (mode & FMODE_READ) 489 switch (mode & (FMODE_READ | FMODE_WRITE)) {
479 state->nreaders--; 490 case FMODE_READ:
480 if (mode & FMODE_WRITE) 491 state->n_rdonly--;
481 state->nwriters--; 492 break;
493 case FMODE_WRITE:
494 state->n_wronly--;
495 break;
496 case FMODE_READ|FMODE_WRITE:
497 state->n_rdwr--;
498 }
482 oldstate = newstate = state->state; 499 oldstate = newstate = state->state;
483 if (state->nreaders == 0) 500 if (state->n_rdwr == 0) {
484 newstate &= ~FMODE_READ; 501 if (state->n_rdonly == 0)
485 if (state->nwriters == 0) 502 newstate &= ~FMODE_READ;
486 newstate &= ~FMODE_WRITE; 503 if (state->n_wronly == 0)
504 newstate &= ~FMODE_WRITE;
505 }
487 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 506 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
488 nfs4_state_set_mode_locked(state, newstate); 507 nfs4_state_set_mode_locked(state, newstate);
489 oldstate = newstate; 508 oldstate = newstate;
@@ -644,12 +663,15 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
644 663
645struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) 664struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
646{ 665{
666 struct rpc_sequence *sequence = counter->sequence;
647 struct nfs_seqid *new; 667 struct nfs_seqid *new;
648 668
649 new = kmalloc(sizeof(*new), GFP_KERNEL); 669 new = kmalloc(sizeof(*new), GFP_KERNEL);
650 if (new != NULL) { 670 if (new != NULL) {
651 new->sequence = counter; 671 new->sequence = counter;
652 INIT_LIST_HEAD(&new->list); 672 spin_lock(&sequence->lock);
673 list_add_tail(&new->list, &sequence->list);
674 spin_unlock(&sequence->lock);
653 } 675 }
654 return new; 676 return new;
655} 677}
@@ -658,12 +680,10 @@ void nfs_free_seqid(struct nfs_seqid *seqid)
658{ 680{
659 struct rpc_sequence *sequence = seqid->sequence->sequence; 681 struct rpc_sequence *sequence = seqid->sequence->sequence;
660 682
661 if (!list_empty(&seqid->list)) { 683 spin_lock(&sequence->lock);
662 spin_lock(&sequence->lock); 684 list_del(&seqid->list);
663 list_del(&seqid->list); 685 spin_unlock(&sequence->lock);
664 spin_unlock(&sequence->lock); 686 rpc_wake_up(&sequence->wait);
665 }
666 rpc_wake_up_next(&sequence->wait);
667 kfree(seqid); 687 kfree(seqid);
668} 688}
669 689
@@ -722,56 +742,53 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
722 if (sequence->list.next == &seqid->list) 742 if (sequence->list.next == &seqid->list)
723 goto out; 743 goto out;
724 spin_lock(&sequence->lock); 744 spin_lock(&sequence->lock);
725 if (!list_empty(&sequence->list)) { 745 if (sequence->list.next != &seqid->list) {
726 rpc_sleep_on(&sequence->wait, task, NULL, NULL); 746 rpc_sleep_on(&sequence->wait, task, NULL, NULL);
727 status = -EAGAIN; 747 status = -EAGAIN;
728 } else 748 }
729 list_add(&seqid->list, &sequence->list);
730 spin_unlock(&sequence->lock); 749 spin_unlock(&sequence->lock);
731out: 750out:
732 return status; 751 return status;
733} 752}
734 753
735static int reclaimer(void *); 754static int reclaimer(void *);
736struct reclaimer_args { 755
737 struct nfs4_client *clp; 756static inline void nfs4_clear_recover_bit(struct nfs4_client *clp)
738 struct completion complete; 757{
739}; 758 smp_mb__before_clear_bit();
759 clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
760 smp_mb__after_clear_bit();
761 wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
762 rpc_wake_up(&clp->cl_rpcwaitq);
763}
740 764
741/* 765/*
742 * State recovery routine 766 * State recovery routine
743 */ 767 */
744void 768static void nfs4_recover_state(struct nfs4_client *clp)
745nfs4_recover_state(void *data)
746{ 769{
747 struct nfs4_client *clp = (struct nfs4_client *)data; 770 struct task_struct *task;
748 struct reclaimer_args args = {
749 .clp = clp,
750 };
751 might_sleep();
752
753 init_completion(&args.complete);
754 771
755 if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0) 772 __module_get(THIS_MODULE);
756 goto out_failed_clear; 773 atomic_inc(&clp->cl_count);
757 wait_for_completion(&args.complete); 774 task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
758 return; 775 NIPQUAD(clp->cl_addr));
759out_failed_clear: 776 if (!IS_ERR(task))
760 set_bit(NFS4CLNT_OK, &clp->cl_state); 777 return;
761 wake_up_all(&clp->cl_waitq); 778 nfs4_clear_recover_bit(clp);
762 rpc_wake_up(&clp->cl_rpcwaitq); 779 nfs4_put_client(clp);
780 module_put(THIS_MODULE);
763} 781}
764 782
765/* 783/*
766 * Schedule a state recovery attempt 784 * Schedule a state recovery attempt
767 */ 785 */
768void 786void nfs4_schedule_state_recovery(struct nfs4_client *clp)
769nfs4_schedule_state_recovery(struct nfs4_client *clp)
770{ 787{
771 if (!clp) 788 if (!clp)
772 return; 789 return;
773 if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state)) 790 if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
774 schedule_work(&clp->cl_recoverd); 791 nfs4_recover_state(clp);
775} 792}
776 793
777static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state) 794static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
@@ -887,18 +904,14 @@ static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
887 904
888static int reclaimer(void *ptr) 905static int reclaimer(void *ptr)
889{ 906{
890 struct reclaimer_args *args = (struct reclaimer_args *)ptr; 907 struct nfs4_client *clp = ptr;
891 struct nfs4_client *clp = args->clp;
892 struct nfs4_state_owner *sp; 908 struct nfs4_state_owner *sp;
893 struct nfs4_state_recovery_ops *ops; 909 struct nfs4_state_recovery_ops *ops;
910 struct rpc_cred *cred;
894 int status = 0; 911 int status = 0;
895 912
896 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
897 allow_signal(SIGKILL); 913 allow_signal(SIGKILL);
898 914
899 atomic_inc(&clp->cl_count);
900 complete(&args->complete);
901
902 /* Ensure exclusive access to NFSv4 state */ 915 /* Ensure exclusive access to NFSv4 state */
903 lock_kernel(); 916 lock_kernel();
904 down_write(&clp->cl_sem); 917 down_write(&clp->cl_sem);
@@ -906,20 +919,33 @@ static int reclaimer(void *ptr)
906 if (list_empty(&clp->cl_superblocks)) 919 if (list_empty(&clp->cl_superblocks))
907 goto out; 920 goto out;
908restart_loop: 921restart_loop:
909 status = nfs4_proc_renew(clp); 922 ops = &nfs4_network_partition_recovery_ops;
910 switch (status) { 923 /* Are there any open files on this volume? */
911 case 0: 924 cred = nfs4_get_renew_cred(clp);
912 case -NFS4ERR_CB_PATH_DOWN: 925 if (cred != NULL) {
913 goto out; 926 /* Yes there are: try to renew the old lease */
914 case -NFS4ERR_STALE_CLIENTID: 927 status = nfs4_proc_renew(clp, cred);
915 case -NFS4ERR_LEASE_MOVED: 928 switch (status) {
916 ops = &nfs4_reboot_recovery_ops; 929 case 0:
917 break; 930 case -NFS4ERR_CB_PATH_DOWN:
918 default: 931 put_rpccred(cred);
919 ops = &nfs4_network_partition_recovery_ops; 932 goto out;
920 }; 933 case -NFS4ERR_STALE_CLIENTID:
934 case -NFS4ERR_LEASE_MOVED:
935 ops = &nfs4_reboot_recovery_ops;
936 }
937 } else {
938 /* "reboot" to ensure we clear all state on the server */
939 clp->cl_boot_time = CURRENT_TIME;
940 cred = nfs4_get_setclientid_cred(clp);
941 }
942 /* We're going to have to re-establish a clientid */
921 nfs4_state_mark_reclaim(clp); 943 nfs4_state_mark_reclaim(clp);
922 status = __nfs4_init_client(clp); 944 status = -ENOENT;
945 if (cred != NULL) {
946 status = nfs4_init_client(clp, cred);
947 put_rpccred(cred);
948 }
923 if (status) 949 if (status)
924 goto out_error; 950 goto out_error;
925 /* Mark all delegations for reclaim */ 951 /* Mark all delegations for reclaim */
@@ -940,14 +966,13 @@ restart_loop:
940 } 966 }
941 nfs_delegation_reap_unclaimed(clp); 967 nfs_delegation_reap_unclaimed(clp);
942out: 968out:
943 set_bit(NFS4CLNT_OK, &clp->cl_state);
944 up_write(&clp->cl_sem); 969 up_write(&clp->cl_sem);
945 unlock_kernel(); 970 unlock_kernel();
946 wake_up_all(&clp->cl_waitq);
947 rpc_wake_up(&clp->cl_rpcwaitq);
948 if (status == -NFS4ERR_CB_PATH_DOWN) 971 if (status == -NFS4ERR_CB_PATH_DOWN)
949 nfs_handle_cb_pathdown(clp); 972 nfs_handle_cb_pathdown(clp);
973 nfs4_clear_recover_bit(clp);
950 nfs4_put_client(clp); 974 nfs4_put_client(clp);
975 module_put_and_exit(0);
951 return 0; 976 return 0;
952out_error: 977out_error:
953 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n", 978 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",