aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/nfs4state.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-23 11:53:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-23 11:53:47 -0400
commitf63d395d47f37a4fe771e6d4b1db9d2cdae5ffc5 (patch)
tree3448a14ae965802adb963762cadeb9989ce4caa2 /fs/nfs/nfs4state.c
parent643ac9fc5429e85b8b7f534544b80bcc4f34c367 (diff)
parent5a7c9eec9fde1da0e3adf0a4ddb64ff2a324a492 (diff)
Merge tag 'nfs-for-3.4-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
Pull NFS client updates for Linux 3.4 from Trond Myklebust: "New features include: - Add NFS client support for containers. This should enable most of the necessary functionality, including lockd support, and support for rpc.statd, NFSv4 idmapper and RPCSEC_GSS upcalls into the correct network namespace from which the mount system call was issued. - NFSv4 idmapper scalability improvements Base the idmapper cache on the keyring interface to allow concurrent access to idmapper entries. Start the process of migrating users from the single-threaded daemon-based approach to the multi-threaded request-key based approach. - NFSv4.1 implementation id. Allows the NFSv4.1 client and server to mutually identify each other for logging and debugging purposes. - Support the 'vers=4.1' mount option for mounting NFSv4.1 instead of having to use the more counterintuitive 'vers=4,minorversion=1'. - SUNRPC tracepoints. Start the process of adding tracepoints in order to improve debugging of the RPC layer. - pNFS object layout support for autologin. Important bugfixes include: - Fix a bug in rpc_wake_up/rpc_wake_up_status that caused them to fail to wake up all tasks when applied to priority waitqueues. - Ensure that we handle read delegations correctly, when we try to truncate a file. - A number of fixes for NFSv4 state manager loops (mostly to do with delegation recovery)." * tag 'nfs-for-3.4-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (224 commits) NFS: fix sb->s_id in nfs debug prints xprtrdma: Remove assumption that each segment is <= PAGE_SIZE xprtrdma: The transport should not bug-check when a dup reply is received pnfs-obj: autologin: Add support for protocol autologin NFS: Remove nfs4_setup_sequence from generic rename code NFS: Remove nfs4_setup_sequence from generic unlink code NFS: Remove nfs4_setup_sequence from generic read code NFS: Remove nfs4_setup_sequence from generic write code NFS: Fix more NFS debug related build warnings SUNRPC/LOCKD: Fix build warnings when CONFIG_SUNRPC_DEBUG is undefined nfs: non void functions must return a value SUNRPC: Kill compiler warning when RPC_DEBUG is unset SUNRPC/NFS: Add Kbuild dependencies for NFS_DEBUG/RPC_DEBUG NFS: Use cond_resched_lock() to reduce latencies in the commit scans NFSv4: It is not safe to dereference lsp->ls_state in release_lockowner NFS: ncommit count is being double decremented SUNRPC: We must not use list_for_each_entry_safe() in rpc_wake_up() Try using machine credentials for RENEW calls NFSv4.1: Fix a few issues in filelayout_commit_pagelist NFSv4.1: Clean ups and bugfixes for the pNFS read/writeback/commit code ...
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r--fs/nfs/nfs4state.c355
1 files changed, 200 insertions, 155 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 45392032e7b..0f43414eb25 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -146,6 +146,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
146 struct rpc_cred *cred = NULL; 146 struct rpc_cred *cred = NULL;
147 struct nfs_server *server; 147 struct nfs_server *server;
148 148
149 /* Use machine credentials if available */
150 cred = nfs4_get_machine_cred_locked(clp);
151 if (cred != NULL)
152 goto out;
153
149 rcu_read_lock(); 154 rcu_read_lock();
150 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 155 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
151 cred = nfs4_get_renew_cred_server_locked(server); 156 cred = nfs4_get_renew_cred_server_locked(server);
@@ -153,6 +158,8 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
153 break; 158 break;
154 } 159 }
155 rcu_read_unlock(); 160 rcu_read_unlock();
161
162out:
156 return cred; 163 return cred;
157} 164}
158 165
@@ -190,30 +197,29 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
190static void nfs4_end_drain_session(struct nfs_client *clp) 197static void nfs4_end_drain_session(struct nfs_client *clp)
191{ 198{
192 struct nfs4_session *ses = clp->cl_session; 199 struct nfs4_session *ses = clp->cl_session;
200 struct nfs4_slot_table *tbl;
193 int max_slots; 201 int max_slots;
194 202
195 if (ses == NULL) 203 if (ses == NULL)
196 return; 204 return;
205 tbl = &ses->fc_slot_table;
197 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 206 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
198 spin_lock(&ses->fc_slot_table.slot_tbl_lock); 207 spin_lock(&tbl->slot_tbl_lock);
199 max_slots = ses->fc_slot_table.max_slots; 208 max_slots = tbl->max_slots;
200 while (max_slots--) { 209 while (max_slots--) {
201 struct rpc_task *task; 210 if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
202 211 nfs4_set_task_privileged,
203 task = rpc_wake_up_next(&ses->fc_slot_table. 212 NULL) == NULL)
204 slot_tbl_waitq);
205 if (!task)
206 break; 213 break;
207 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
208 } 214 }
209 spin_unlock(&ses->fc_slot_table.slot_tbl_lock); 215 spin_unlock(&tbl->slot_tbl_lock);
210 } 216 }
211} 217}
212 218
213static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) 219static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
214{ 220{
215 spin_lock(&tbl->slot_tbl_lock); 221 spin_lock(&tbl->slot_tbl_lock);
216 if (tbl->highest_used_slotid != -1) { 222 if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
217 INIT_COMPLETION(tbl->complete); 223 INIT_COMPLETION(tbl->complete);
218 spin_unlock(&tbl->slot_tbl_lock); 224 spin_unlock(&tbl->slot_tbl_lock);
219 return wait_for_completion_interruptible(&tbl->complete); 225 return wait_for_completion_interruptible(&tbl->complete);
@@ -317,62 +323,6 @@ out:
317 return cred; 323 return cred;
318} 324}
319 325
320static void nfs_alloc_unique_id_locked(struct rb_root *root,
321 struct nfs_unique_id *new,
322 __u64 minval, int maxbits)
323{
324 struct rb_node **p, *parent;
325 struct nfs_unique_id *pos;
326 __u64 mask = ~0ULL;
327
328 if (maxbits < 64)
329 mask = (1ULL << maxbits) - 1ULL;
330
331 /* Ensure distribution is more or less flat */
332 get_random_bytes(&new->id, sizeof(new->id));
333 new->id &= mask;
334 if (new->id < minval)
335 new->id += minval;
336retry:
337 p = &root->rb_node;
338 parent = NULL;
339
340 while (*p != NULL) {
341 parent = *p;
342 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
343
344 if (new->id < pos->id)
345 p = &(*p)->rb_left;
346 else if (new->id > pos->id)
347 p = &(*p)->rb_right;
348 else
349 goto id_exists;
350 }
351 rb_link_node(&new->rb_node, parent, p);
352 rb_insert_color(&new->rb_node, root);
353 return;
354id_exists:
355 for (;;) {
356 new->id++;
357 if (new->id < minval || (new->id & mask) != new->id) {
358 new->id = minval;
359 break;
360 }
361 parent = rb_next(parent);
362 if (parent == NULL)
363 break;
364 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
365 if (new->id < pos->id)
366 break;
367 }
368 goto retry;
369}
370
371static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
372{
373 rb_erase(&id->rb_node, root);
374}
375
376static struct nfs4_state_owner * 326static struct nfs4_state_owner *
377nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) 327nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
378{ 328{
@@ -405,6 +355,7 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
405 struct rb_node **p = &server->state_owners.rb_node, 355 struct rb_node **p = &server->state_owners.rb_node,
406 *parent = NULL; 356 *parent = NULL;
407 struct nfs4_state_owner *sp; 357 struct nfs4_state_owner *sp;
358 int err;
408 359
409 while (*p != NULL) { 360 while (*p != NULL) {
410 parent = *p; 361 parent = *p;
@@ -421,8 +372,9 @@ nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
421 return sp; 372 return sp;
422 } 373 }
423 } 374 }
424 nfs_alloc_unique_id_locked(&server->openowner_id, 375 err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id);
425 &new->so_owner_id, 1, 64); 376 if (err)
377 return ERR_PTR(err);
426 rb_link_node(&new->so_server_node, parent, p); 378 rb_link_node(&new->so_server_node, parent, p);
427 rb_insert_color(&new->so_server_node, &server->state_owners); 379 rb_insert_color(&new->so_server_node, &server->state_owners);
428 return new; 380 return new;
@@ -435,7 +387,23 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
435 387
436 if (!RB_EMPTY_NODE(&sp->so_server_node)) 388 if (!RB_EMPTY_NODE(&sp->so_server_node))
437 rb_erase(&sp->so_server_node, &server->state_owners); 389 rb_erase(&sp->so_server_node, &server->state_owners);
438 nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id); 390 ida_remove(&server->openowner_id, sp->so_seqid.owner_id);
391}
392
393static void
394nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
395{
396 sc->flags = 0;
397 sc->counter = 0;
398 spin_lock_init(&sc->lock);
399 INIT_LIST_HEAD(&sc->list);
400 rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
401}
402
403static void
404nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
405{
406 rpc_destroy_wait_queue(&sc->wait);
439} 407}
440 408
441/* 409/*
@@ -444,19 +412,20 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
444 * 412 *
445 */ 413 */
446static struct nfs4_state_owner * 414static struct nfs4_state_owner *
447nfs4_alloc_state_owner(void) 415nfs4_alloc_state_owner(struct nfs_server *server,
416 struct rpc_cred *cred,
417 gfp_t gfp_flags)
448{ 418{
449 struct nfs4_state_owner *sp; 419 struct nfs4_state_owner *sp;
450 420
451 sp = kzalloc(sizeof(*sp),GFP_NOFS); 421 sp = kzalloc(sizeof(*sp), gfp_flags);
452 if (!sp) 422 if (!sp)
453 return NULL; 423 return NULL;
424 sp->so_server = server;
425 sp->so_cred = get_rpccred(cred);
454 spin_lock_init(&sp->so_lock); 426 spin_lock_init(&sp->so_lock);
455 INIT_LIST_HEAD(&sp->so_states); 427 INIT_LIST_HEAD(&sp->so_states);
456 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); 428 nfs4_init_seqid_counter(&sp->so_seqid);
457 sp->so_seqid.sequence = &sp->so_sequence;
458 spin_lock_init(&sp->so_sequence.lock);
459 INIT_LIST_HEAD(&sp->so_sequence.list);
460 atomic_set(&sp->so_count, 1); 429 atomic_set(&sp->so_count, 1);
461 INIT_LIST_HEAD(&sp->so_lru); 430 INIT_LIST_HEAD(&sp->so_lru);
462 return sp; 431 return sp;
@@ -478,7 +447,7 @@ nfs4_drop_state_owner(struct nfs4_state_owner *sp)
478 447
479static void nfs4_free_state_owner(struct nfs4_state_owner *sp) 448static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
480{ 449{
481 rpc_destroy_wait_queue(&sp->so_sequence.wait); 450 nfs4_destroy_seqid_counter(&sp->so_seqid);
482 put_rpccred(sp->so_cred); 451 put_rpccred(sp->so_cred);
483 kfree(sp); 452 kfree(sp);
484} 453}
@@ -516,7 +485,8 @@ static void nfs4_gc_state_owners(struct nfs_server *server)
516 * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. 485 * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
517 */ 486 */
518struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, 487struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
519 struct rpc_cred *cred) 488 struct rpc_cred *cred,
489 gfp_t gfp_flags)
520{ 490{
521 struct nfs_client *clp = server->nfs_client; 491 struct nfs_client *clp = server->nfs_client;
522 struct nfs4_state_owner *sp, *new; 492 struct nfs4_state_owner *sp, *new;
@@ -526,20 +496,18 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
526 spin_unlock(&clp->cl_lock); 496 spin_unlock(&clp->cl_lock);
527 if (sp != NULL) 497 if (sp != NULL)
528 goto out; 498 goto out;
529 new = nfs4_alloc_state_owner(); 499 new = nfs4_alloc_state_owner(server, cred, gfp_flags);
530 if (new == NULL) 500 if (new == NULL)
531 goto out; 501 goto out;
532 new->so_server = server; 502 do {
533 new->so_cred = cred; 503 if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
534 spin_lock(&clp->cl_lock); 504 break;
535 sp = nfs4_insert_state_owner_locked(new); 505 spin_lock(&clp->cl_lock);
536 spin_unlock(&clp->cl_lock); 506 sp = nfs4_insert_state_owner_locked(new);
537 if (sp == new) 507 spin_unlock(&clp->cl_lock);
538 get_rpccred(cred); 508 } while (sp == ERR_PTR(-EAGAIN));
539 else { 509 if (sp != new)
540 rpc_destroy_wait_queue(&new->so_sequence.wait); 510 nfs4_free_state_owner(new);
541 kfree(new);
542 }
543out: 511out:
544 nfs4_gc_state_owners(server); 512 nfs4_gc_state_owners(server);
545 return sp; 513 return sp;
@@ -795,15 +763,11 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
795{ 763{
796 struct nfs4_lock_state *lsp; 764 struct nfs4_lock_state *lsp;
797 struct nfs_server *server = state->owner->so_server; 765 struct nfs_server *server = state->owner->so_server;
798 struct nfs_client *clp = server->nfs_client;
799 766
800 lsp = kzalloc(sizeof(*lsp), GFP_NOFS); 767 lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
801 if (lsp == NULL) 768 if (lsp == NULL)
802 return NULL; 769 return NULL;
803 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue"); 770 nfs4_init_seqid_counter(&lsp->ls_seqid);
804 spin_lock_init(&lsp->ls_sequence.lock);
805 INIT_LIST_HEAD(&lsp->ls_sequence.list);
806 lsp->ls_seqid.sequence = &lsp->ls_sequence;
807 atomic_set(&lsp->ls_count, 1); 771 atomic_set(&lsp->ls_count, 1);
808 lsp->ls_state = state; 772 lsp->ls_state = state;
809 lsp->ls_owner.lo_type = type; 773 lsp->ls_owner.lo_type = type;
@@ -815,25 +779,22 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
815 lsp->ls_owner.lo_u.posix_owner = fl_owner; 779 lsp->ls_owner.lo_u.posix_owner = fl_owner;
816 break; 780 break;
817 default: 781 default:
818 kfree(lsp); 782 goto out_free;
819 return NULL;
820 } 783 }
821 spin_lock(&clp->cl_lock); 784 lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
822 nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64); 785 if (lsp->ls_seqid.owner_id < 0)
823 spin_unlock(&clp->cl_lock); 786 goto out_free;
824 INIT_LIST_HEAD(&lsp->ls_locks); 787 INIT_LIST_HEAD(&lsp->ls_locks);
825 return lsp; 788 return lsp;
789out_free:
790 kfree(lsp);
791 return NULL;
826} 792}
827 793
828static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) 794void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
829{ 795{
830 struct nfs_server *server = lsp->ls_state->owner->so_server; 796 ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
831 struct nfs_client *clp = server->nfs_client; 797 nfs4_destroy_seqid_counter(&lsp->ls_seqid);
832
833 spin_lock(&clp->cl_lock);
834 nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
835 spin_unlock(&clp->cl_lock);
836 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
837 kfree(lsp); 798 kfree(lsp);
838} 799}
839 800
@@ -865,7 +826,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
865 } 826 }
866 spin_unlock(&state->state_lock); 827 spin_unlock(&state->state_lock);
867 if (new != NULL) 828 if (new != NULL)
868 nfs4_free_lock_state(new); 829 nfs4_free_lock_state(state->owner->so_server, new);
869 return lsp; 830 return lsp;
870} 831}
871 832
@@ -886,9 +847,11 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
886 if (list_empty(&state->lock_states)) 847 if (list_empty(&state->lock_states))
887 clear_bit(LK_STATE_IN_USE, &state->flags); 848 clear_bit(LK_STATE_IN_USE, &state->flags);
888 spin_unlock(&state->state_lock); 849 spin_unlock(&state->state_lock);
889 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) 850 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
890 nfs4_release_lockowner(lsp); 851 if (nfs4_release_lockowner(lsp) == 0)
891 nfs4_free_lock_state(lsp); 852 return;
853 }
854 nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp);
892} 855}
893 856
894static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) 857static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
@@ -918,7 +881,8 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
918 if (fl->fl_flags & FL_POSIX) 881 if (fl->fl_flags & FL_POSIX)
919 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE); 882 lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
920 else if (fl->fl_flags & FL_FLOCK) 883 else if (fl->fl_flags & FL_FLOCK)
921 lsp = nfs4_get_lock_state(state, 0, fl->fl_pid, NFS4_FLOCK_LOCK_TYPE); 884 lsp = nfs4_get_lock_state(state, NULL, fl->fl_pid,
885 NFS4_FLOCK_LOCK_TYPE);
922 else 886 else
923 return -EINVAL; 887 return -EINVAL;
924 if (lsp == NULL) 888 if (lsp == NULL)
@@ -928,28 +892,49 @@ int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
928 return 0; 892 return 0;
929} 893}
930 894
931/* 895static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state,
932 * Byte-range lock aware utility to initialize the stateid of read/write 896 fl_owner_t fl_owner, pid_t fl_pid)
933 * requests.
934 */
935void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid)
936{ 897{
937 struct nfs4_lock_state *lsp; 898 struct nfs4_lock_state *lsp;
938 int seq; 899 bool ret = false;
939 900
940 do {
941 seq = read_seqbegin(&state->seqlock);
942 memcpy(dst, &state->stateid, sizeof(*dst));
943 } while (read_seqretry(&state->seqlock, seq));
944 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) 901 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
945 return; 902 goto out;
946 903
947 spin_lock(&state->state_lock); 904 spin_lock(&state->state_lock);
948 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE); 905 lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
949 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 906 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
950 memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); 907 nfs4_stateid_copy(dst, &lsp->ls_stateid);
908 ret = true;
909 }
951 spin_unlock(&state->state_lock); 910 spin_unlock(&state->state_lock);
952 nfs4_put_lock_state(lsp); 911 nfs4_put_lock_state(lsp);
912out:
913 return ret;
914}
915
916static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
917{
918 int seq;
919
920 do {
921 seq = read_seqbegin(&state->seqlock);
922 nfs4_stateid_copy(dst, &state->stateid);
923 } while (read_seqretry(&state->seqlock, seq));
924}
925
926/*
927 * Byte-range lock aware utility to initialize the stateid of read/write
928 * requests.
929 */
930void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
931 fmode_t fmode, fl_owner_t fl_owner, pid_t fl_pid)
932{
933 if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
934 return;
935 if (nfs4_copy_lock_stateid(dst, state, fl_owner, fl_pid))
936 return;
937 nfs4_copy_open_stateid(dst, state);
953} 938}
954 939
955struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) 940struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
@@ -960,20 +945,28 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_m
960 if (new != NULL) { 945 if (new != NULL) {
961 new->sequence = counter; 946 new->sequence = counter;
962 INIT_LIST_HEAD(&new->list); 947 INIT_LIST_HEAD(&new->list);
948 new->task = NULL;
963 } 949 }
964 return new; 950 return new;
965} 951}
966 952
967void nfs_release_seqid(struct nfs_seqid *seqid) 953void nfs_release_seqid(struct nfs_seqid *seqid)
968{ 954{
969 if (!list_empty(&seqid->list)) { 955 struct nfs_seqid_counter *sequence;
970 struct rpc_sequence *sequence = seqid->sequence->sequence;
971 956
972 spin_lock(&sequence->lock); 957 if (list_empty(&seqid->list))
973 list_del_init(&seqid->list); 958 return;
974 spin_unlock(&sequence->lock); 959 sequence = seqid->sequence;
975 rpc_wake_up(&sequence->wait); 960 spin_lock(&sequence->lock);
961 list_del_init(&seqid->list);
962 if (!list_empty(&sequence->list)) {
963 struct nfs_seqid *next;
964
965 next = list_first_entry(&sequence->list,
966 struct nfs_seqid, list);
967 rpc_wake_up_queued_task(&sequence->wait, next->task);
976 } 968 }
969 spin_unlock(&sequence->lock);
977} 970}
978 971
979void nfs_free_seqid(struct nfs_seqid *seqid) 972void nfs_free_seqid(struct nfs_seqid *seqid)
@@ -989,14 +982,14 @@ void nfs_free_seqid(struct nfs_seqid *seqid)
989 */ 982 */
990static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) 983static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
991{ 984{
992 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid); 985 BUG_ON(list_first_entry(&seqid->sequence->list, struct nfs_seqid, list) != seqid);
993 switch (status) { 986 switch (status) {
994 case 0: 987 case 0:
995 break; 988 break;
996 case -NFS4ERR_BAD_SEQID: 989 case -NFS4ERR_BAD_SEQID:
997 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) 990 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
998 return; 991 return;
999 printk(KERN_WARNING "NFS: v4 server returned a bad" 992 pr_warn_ratelimited("NFS: v4 server returned a bad"
1000 " sequence-id error on an" 993 " sequence-id error on an"
1001 " unconfirmed sequence %p!\n", 994 " unconfirmed sequence %p!\n",
1002 seqid->sequence); 995 seqid->sequence);
@@ -1040,10 +1033,11 @@ void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
1040 1033
1041int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) 1034int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
1042{ 1035{
1043 struct rpc_sequence *sequence = seqid->sequence->sequence; 1036 struct nfs_seqid_counter *sequence = seqid->sequence;
1044 int status = 0; 1037 int status = 0;
1045 1038
1046 spin_lock(&sequence->lock); 1039 spin_lock(&sequence->lock);
1040 seqid->task = task;
1047 if (list_empty(&seqid->list)) 1041 if (list_empty(&seqid->list))
1048 list_add_tail(&seqid->list, &sequence->list); 1042 list_add_tail(&seqid->list, &sequence->list);
1049 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid) 1043 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
@@ -1072,19 +1066,28 @@ static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
1072void nfs4_schedule_state_manager(struct nfs_client *clp) 1066void nfs4_schedule_state_manager(struct nfs_client *clp)
1073{ 1067{
1074 struct task_struct *task; 1068 struct task_struct *task;
1069 char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
1075 1070
1076 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) 1071 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1077 return; 1072 return;
1078 __module_get(THIS_MODULE); 1073 __module_get(THIS_MODULE);
1079 atomic_inc(&clp->cl_count); 1074 atomic_inc(&clp->cl_count);
1080 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager", 1075
1081 rpc_peeraddr2str(clp->cl_rpcclient, 1076 /* The rcu_read_lock() is not strictly necessary, as the state
1082 RPC_DISPLAY_ADDR)); 1077 * manager is the only thread that ever changes the rpc_xprt
1083 if (!IS_ERR(task)) 1078 * after it's initialized. At this point, we're single threaded. */
1084 return; 1079 rcu_read_lock();
1085 nfs4_clear_state_manager_bit(clp); 1080 snprintf(buf, sizeof(buf), "%s-manager",
1086 nfs_put_client(clp); 1081 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
1087 module_put(THIS_MODULE); 1082 rcu_read_unlock();
1083 task = kthread_run(nfs4_run_state_manager, clp, buf);
1084 if (IS_ERR(task)) {
1085 printk(KERN_ERR "%s: kthread_run: %ld\n",
1086 __func__, PTR_ERR(task));
1087 nfs4_clear_state_manager_bit(clp);
1088 nfs_put_client(clp);
1089 module_put(THIS_MODULE);
1090 }
1088} 1091}
1089 1092
1090/* 1093/*
@@ -1098,10 +1101,25 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1098 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1101 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1099 nfs4_schedule_state_manager(clp); 1102 nfs4_schedule_state_manager(clp);
1100} 1103}
1104EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
1105
1106/*
1107 * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
1108 * @clp: client to process
1109 *
1110 * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
1111 * resend of the SETCLIENTID and hence re-establish the
1112 * callback channel. Then return all existing delegations.
1113 */
1114static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
1115{
1116 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1117 nfs_expire_all_delegations(clp);
1118}
1101 1119
1102void nfs4_schedule_path_down_recovery(struct nfs_client *clp) 1120void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
1103{ 1121{
1104 nfs_handle_cb_pathdown(clp); 1122 nfs40_handle_cb_pathdown(clp);
1105 nfs4_schedule_state_manager(clp); 1123 nfs4_schedule_state_manager(clp);
1106} 1124}
1107 1125
@@ -1132,11 +1150,37 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
1132{ 1150{
1133 struct nfs_client *clp = server->nfs_client; 1151 struct nfs_client *clp = server->nfs_client;
1134 1152
1135 if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags))
1136 nfs_async_inode_return_delegation(state->inode, &state->stateid);
1137 nfs4_state_mark_reclaim_nograce(clp, state); 1153 nfs4_state_mark_reclaim_nograce(clp, state);
1138 nfs4_schedule_state_manager(clp); 1154 nfs4_schedule_state_manager(clp);
1139} 1155}
1156EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
1157
1158void nfs_inode_find_state_and_recover(struct inode *inode,
1159 const nfs4_stateid *stateid)
1160{
1161 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1162 struct nfs_inode *nfsi = NFS_I(inode);
1163 struct nfs_open_context *ctx;
1164 struct nfs4_state *state;
1165 bool found = false;
1166
1167 spin_lock(&inode->i_lock);
1168 list_for_each_entry(ctx, &nfsi->open_files, list) {
1169 state = ctx->state;
1170 if (state == NULL)
1171 continue;
1172 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
1173 continue;
1174 if (!nfs4_stateid_match(&state->stateid, stateid))
1175 continue;
1176 nfs4_state_mark_reclaim_nograce(clp, state);
1177 found = true;
1178 }
1179 spin_unlock(&inode->i_lock);
1180 if (found)
1181 nfs4_schedule_state_manager(clp);
1182}
1183
1140 1184
1141static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) 1185static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1142{ 1186{
@@ -1175,8 +1219,8 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
1175 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1219 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1176 goto out; 1220 goto out;
1177 default: 1221 default:
1178 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", 1222 printk(KERN_ERR "NFS: %s: unhandled error %d. "
1179 __func__, status); 1223 "Zeroing state\n", __func__, status);
1180 case -ENOMEM: 1224 case -ENOMEM:
1181 case -NFS4ERR_DENIED: 1225 case -NFS4ERR_DENIED:
1182 case -NFS4ERR_RECLAIM_BAD: 1226 case -NFS4ERR_RECLAIM_BAD:
@@ -1222,8 +1266,9 @@ restart:
1222 spin_lock(&state->state_lock); 1266 spin_lock(&state->state_lock);
1223 list_for_each_entry(lock, &state->lock_states, ls_locks) { 1267 list_for_each_entry(lock, &state->lock_states, ls_locks) {
1224 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) 1268 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
1225 printk("%s: Lock reclaim failed!\n", 1269 pr_warn_ratelimited("NFS: "
1226 __func__); 1270 "%s: Lock reclaim "
1271 "failed!\n", __func__);
1227 } 1272 }
1228 spin_unlock(&state->state_lock); 1273 spin_unlock(&state->state_lock);
1229 nfs4_put_open_state(state); 1274 nfs4_put_open_state(state);
@@ -1232,8 +1277,8 @@ restart:
1232 } 1277 }
1233 switch (status) { 1278 switch (status) {
1234 default: 1279 default:
1235 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", 1280 printk(KERN_ERR "NFS: %s: unhandled error %d. "
1236 __func__, status); 1281 "Zeroing state\n", __func__, status);
1237 case -ENOENT: 1282 case -ENOENT:
1238 case -ENOMEM: 1283 case -ENOMEM:
1239 case -ESTALE: 1284 case -ESTALE:
@@ -1241,8 +1286,8 @@ restart:
1241 * Open state on this file cannot be recovered 1286 * Open state on this file cannot be recovered
1242 * All we can do is revert to using the zero stateid. 1287 * All we can do is revert to using the zero stateid.
1243 */ 1288 */
1244 memset(state->stateid.data, 0, 1289 memset(&state->stateid, 0,
1245 sizeof(state->stateid.data)); 1290 sizeof(state->stateid));
1246 /* Mark the file as being 'closed' */ 1291 /* Mark the file as being 'closed' */
1247 state->state = 0; 1292 state->state = 0;
1248 break; 1293 break;
@@ -1420,7 +1465,7 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1420 case 0: 1465 case 0:
1421 break; 1466 break;
1422 case -NFS4ERR_CB_PATH_DOWN: 1467 case -NFS4ERR_CB_PATH_DOWN:
1423 nfs_handle_cb_pathdown(clp); 1468 nfs40_handle_cb_pathdown(clp);
1424 break; 1469 break;
1425 case -NFS4ERR_NO_GRACE: 1470 case -NFS4ERR_NO_GRACE:
1426 nfs4_state_end_reclaim_reboot(clp); 1471 nfs4_state_end_reclaim_reboot(clp);
@@ -1801,7 +1846,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
1801 } while (atomic_read(&clp->cl_count) > 1); 1846 } while (atomic_read(&clp->cl_count) > 1);
1802 return; 1847 return;
1803out_error: 1848out_error:
1804 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" 1849 pr_warn_ratelimited("NFS: state manager failed on NFSv4 server %s"
1805 " with error %d\n", clp->cl_hostname, -status); 1850 " with error %d\n", clp->cl_hostname, -status);
1806 nfs4_end_drain_session(clp); 1851 nfs4_end_drain_session(clp);
1807 nfs4_clear_state_manager_bit(clp); 1852 nfs4_clear_state_manager_bit(clp);