aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4state.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r--fs/nfsd/nfs4state.c3096
1 files changed, 2120 insertions, 976 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 2204e1fe5725..2e80a59e7e91 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -70,13 +70,11 @@ static u64 current_sessionid = 1;
70#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t))) 70#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
71 71
72/* forward declarations */ 72/* forward declarations */
73static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner); 73static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
74static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
74 75
75/* Locking: */ 76/* Locking: */
76 77
77/* Currently used for almost all code touching nfsv4 state: */
78static DEFINE_MUTEX(client_mutex);
79
80/* 78/*
81 * Currently used for the del_recall_lru and file hash table. In an 79 * Currently used for the del_recall_lru and file hash table. In an
82 * effort to decrease the scope of the client_mutex, this spinlock may 80 * effort to decrease the scope of the client_mutex, this spinlock may
@@ -84,18 +82,18 @@ static DEFINE_MUTEX(client_mutex);
84 */ 82 */
85static DEFINE_SPINLOCK(state_lock); 83static DEFINE_SPINLOCK(state_lock);
86 84
85/*
86 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
87 * the refcount on the open stateid to drop.
88 */
89static DECLARE_WAIT_QUEUE_HEAD(close_wq);
90
87static struct kmem_cache *openowner_slab; 91static struct kmem_cache *openowner_slab;
88static struct kmem_cache *lockowner_slab; 92static struct kmem_cache *lockowner_slab;
89static struct kmem_cache *file_slab; 93static struct kmem_cache *file_slab;
90static struct kmem_cache *stateid_slab; 94static struct kmem_cache *stateid_slab;
91static struct kmem_cache *deleg_slab; 95static struct kmem_cache *deleg_slab;
92 96
93void
94nfs4_lock_state(void)
95{
96 mutex_lock(&client_mutex);
97}
98
99static void free_session(struct nfsd4_session *); 97static void free_session(struct nfsd4_session *);
100 98
101static bool is_session_dead(struct nfsd4_session *ses) 99static bool is_session_dead(struct nfsd4_session *ses)
@@ -103,12 +101,6 @@ static bool is_session_dead(struct nfsd4_session *ses)
103 return ses->se_flags & NFS4_SESSION_DEAD; 101 return ses->se_flags & NFS4_SESSION_DEAD;
104} 102}
105 103
106void nfsd4_put_session(struct nfsd4_session *ses)
107{
108 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
109 free_session(ses);
110}
111
112static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 104static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
113{ 105{
114 if (atomic_read(&ses->se_ref) > ref_held_by_me) 106 if (atomic_read(&ses->se_ref) > ref_held_by_me)
@@ -117,46 +109,17 @@ static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_b
117 return nfs_ok; 109 return nfs_ok;
118} 110}
119 111
120static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
121{
122 if (is_session_dead(ses))
123 return nfserr_badsession;
124 atomic_inc(&ses->se_ref);
125 return nfs_ok;
126}
127
128void
129nfs4_unlock_state(void)
130{
131 mutex_unlock(&client_mutex);
132}
133
134static bool is_client_expired(struct nfs4_client *clp) 112static bool is_client_expired(struct nfs4_client *clp)
135{ 113{
136 return clp->cl_time == 0; 114 return clp->cl_time == 0;
137} 115}
138 116
139static __be32 mark_client_expired_locked(struct nfs4_client *clp) 117static __be32 get_client_locked(struct nfs4_client *clp)
140{
141 if (atomic_read(&clp->cl_refcount))
142 return nfserr_jukebox;
143 clp->cl_time = 0;
144 return nfs_ok;
145}
146
147static __be32 mark_client_expired(struct nfs4_client *clp)
148{ 118{
149 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 119 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
150 __be32 ret;
151 120
152 spin_lock(&nn->client_lock); 121 lockdep_assert_held(&nn->client_lock);
153 ret = mark_client_expired_locked(clp);
154 spin_unlock(&nn->client_lock);
155 return ret;
156}
157 122
158static __be32 get_client_locked(struct nfs4_client *clp)
159{
160 if (is_client_expired(clp)) 123 if (is_client_expired(clp))
161 return nfserr_expired; 124 return nfserr_expired;
162 atomic_inc(&clp->cl_refcount); 125 atomic_inc(&clp->cl_refcount);
@@ -197,13 +160,17 @@ renew_client(struct nfs4_client *clp)
197 160
198static void put_client_renew_locked(struct nfs4_client *clp) 161static void put_client_renew_locked(struct nfs4_client *clp)
199{ 162{
163 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
164
165 lockdep_assert_held(&nn->client_lock);
166
200 if (!atomic_dec_and_test(&clp->cl_refcount)) 167 if (!atomic_dec_and_test(&clp->cl_refcount))
201 return; 168 return;
202 if (!is_client_expired(clp)) 169 if (!is_client_expired(clp))
203 renew_client_locked(clp); 170 renew_client_locked(clp);
204} 171}
205 172
206void put_client_renew(struct nfs4_client *clp) 173static void put_client_renew(struct nfs4_client *clp)
207{ 174{
208 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 175 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
209 176
@@ -214,6 +181,79 @@ void put_client_renew(struct nfs4_client *clp)
214 spin_unlock(&nn->client_lock); 181 spin_unlock(&nn->client_lock);
215} 182}
216 183
184static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
185{
186 __be32 status;
187
188 if (is_session_dead(ses))
189 return nfserr_badsession;
190 status = get_client_locked(ses->se_client);
191 if (status)
192 return status;
193 atomic_inc(&ses->se_ref);
194 return nfs_ok;
195}
196
197static void nfsd4_put_session_locked(struct nfsd4_session *ses)
198{
199 struct nfs4_client *clp = ses->se_client;
200 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
201
202 lockdep_assert_held(&nn->client_lock);
203
204 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
205 free_session(ses);
206 put_client_renew_locked(clp);
207}
208
209static void nfsd4_put_session(struct nfsd4_session *ses)
210{
211 struct nfs4_client *clp = ses->se_client;
212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
213
214 spin_lock(&nn->client_lock);
215 nfsd4_put_session_locked(ses);
216 spin_unlock(&nn->client_lock);
217}
218
219static int
220same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
221{
222 return (sop->so_owner.len == owner->len) &&
223 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
224}
225
226static struct nfs4_openowner *
227find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
228 struct nfs4_client *clp)
229{
230 struct nfs4_stateowner *so;
231
232 lockdep_assert_held(&clp->cl_lock);
233
234 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
235 so_strhash) {
236 if (!so->so_is_open_owner)
237 continue;
238 if (same_owner_str(so, &open->op_owner)) {
239 atomic_inc(&so->so_count);
240 return openowner(so);
241 }
242 }
243 return NULL;
244}
245
246static struct nfs4_openowner *
247find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
248 struct nfs4_client *clp)
249{
250 struct nfs4_openowner *oo;
251
252 spin_lock(&clp->cl_lock);
253 oo = find_openstateowner_str_locked(hashval, open, clp);
254 spin_unlock(&clp->cl_lock);
255 return oo;
256}
217 257
218static inline u32 258static inline u32
219opaque_hashval(const void *ptr, int nbytes) 259opaque_hashval(const void *ptr, int nbytes)
@@ -236,10 +276,11 @@ static void nfsd4_free_file(struct nfs4_file *f)
236static inline void 276static inline void
237put_nfs4_file(struct nfs4_file *fi) 277put_nfs4_file(struct nfs4_file *fi)
238{ 278{
279 might_lock(&state_lock);
280
239 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { 281 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
240 hlist_del(&fi->fi_hash); 282 hlist_del(&fi->fi_hash);
241 spin_unlock(&state_lock); 283 spin_unlock(&state_lock);
242 iput(fi->fi_inode);
243 nfsd4_free_file(fi); 284 nfsd4_free_file(fi);
244 } 285 }
245} 286}
@@ -250,7 +291,80 @@ get_nfs4_file(struct nfs4_file *fi)
250 atomic_inc(&fi->fi_ref); 291 atomic_inc(&fi->fi_ref);
251} 292}
252 293
253static int num_delegations; 294static struct file *
295__nfs4_get_fd(struct nfs4_file *f, int oflag)
296{
297 if (f->fi_fds[oflag])
298 return get_file(f->fi_fds[oflag]);
299 return NULL;
300}
301
302static struct file *
303find_writeable_file_locked(struct nfs4_file *f)
304{
305 struct file *ret;
306
307 lockdep_assert_held(&f->fi_lock);
308
309 ret = __nfs4_get_fd(f, O_WRONLY);
310 if (!ret)
311 ret = __nfs4_get_fd(f, O_RDWR);
312 return ret;
313}
314
315static struct file *
316find_writeable_file(struct nfs4_file *f)
317{
318 struct file *ret;
319
320 spin_lock(&f->fi_lock);
321 ret = find_writeable_file_locked(f);
322 spin_unlock(&f->fi_lock);
323
324 return ret;
325}
326
327static struct file *find_readable_file_locked(struct nfs4_file *f)
328{
329 struct file *ret;
330
331 lockdep_assert_held(&f->fi_lock);
332
333 ret = __nfs4_get_fd(f, O_RDONLY);
334 if (!ret)
335 ret = __nfs4_get_fd(f, O_RDWR);
336 return ret;
337}
338
339static struct file *
340find_readable_file(struct nfs4_file *f)
341{
342 struct file *ret;
343
344 spin_lock(&f->fi_lock);
345 ret = find_readable_file_locked(f);
346 spin_unlock(&f->fi_lock);
347
348 return ret;
349}
350
351static struct file *
352find_any_file(struct nfs4_file *f)
353{
354 struct file *ret;
355
356 spin_lock(&f->fi_lock);
357 ret = __nfs4_get_fd(f, O_RDWR);
358 if (!ret) {
359 ret = __nfs4_get_fd(f, O_WRONLY);
360 if (!ret)
361 ret = __nfs4_get_fd(f, O_RDONLY);
362 }
363 spin_unlock(&f->fi_lock);
364 return ret;
365}
366
367static atomic_long_t num_delegations;
254unsigned long max_delegations; 368unsigned long max_delegations;
255 369
256/* 370/*
@@ -262,12 +376,11 @@ unsigned long max_delegations;
262#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 376#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
263#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 377#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
264 378
265static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername) 379static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
266{ 380{
267 unsigned int ret; 381 unsigned int ret;
268 382
269 ret = opaque_hashval(ownername->data, ownername->len); 383 ret = opaque_hashval(ownername->data, ownername->len);
270 ret += clientid;
271 return ret & OWNER_HASH_MASK; 384 return ret & OWNER_HASH_MASK;
272} 385}
273 386
@@ -275,75 +388,124 @@ static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
275#define FILE_HASH_BITS 8 388#define FILE_HASH_BITS 8
276#define FILE_HASH_SIZE (1 << FILE_HASH_BITS) 389#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
277 390
278static unsigned int file_hashval(struct inode *ino) 391static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
392{
393 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
394}
395
396static unsigned int file_hashval(struct knfsd_fh *fh)
397{
398 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
399}
400
401static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
279{ 402{
280 /* XXX: why are we hashing on inode pointer, anyway? */ 403 return fh1->fh_size == fh2->fh_size &&
281 return hash_ptr(ino, FILE_HASH_BITS); 404 !memcmp(fh1->fh_base.fh_pad,
405 fh2->fh_base.fh_pad,
406 fh1->fh_size);
282} 407}
283 408
284static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; 409static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
285 410
286static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) 411static void
412__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
287{ 413{
288 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR])); 414 lockdep_assert_held(&fp->fi_lock);
289 atomic_inc(&fp->fi_access[oflag]); 415
416 if (access & NFS4_SHARE_ACCESS_WRITE)
417 atomic_inc(&fp->fi_access[O_WRONLY]);
418 if (access & NFS4_SHARE_ACCESS_READ)
419 atomic_inc(&fp->fi_access[O_RDONLY]);
290} 420}
291 421
292static void nfs4_file_get_access(struct nfs4_file *fp, int oflag) 422static __be32
423nfs4_file_get_access(struct nfs4_file *fp, u32 access)
293{ 424{
294 if (oflag == O_RDWR) { 425 lockdep_assert_held(&fp->fi_lock);
295 __nfs4_file_get_access(fp, O_RDONLY); 426
296 __nfs4_file_get_access(fp, O_WRONLY); 427 /* Does this access mode make sense? */
297 } else 428 if (access & ~NFS4_SHARE_ACCESS_BOTH)
298 __nfs4_file_get_access(fp, oflag); 429 return nfserr_inval;
430
431 /* Does it conflict with a deny mode already set? */
432 if ((access & fp->fi_share_deny) != 0)
433 return nfserr_share_denied;
434
435 __nfs4_file_get_access(fp, access);
436 return nfs_ok;
299} 437}
300 438
301static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag) 439static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
302{ 440{
303 if (fp->fi_fds[oflag]) { 441 /* Common case is that there is no deny mode. */
304 fput(fp->fi_fds[oflag]); 442 if (deny) {
305 fp->fi_fds[oflag] = NULL; 443 /* Does this deny mode make sense? */
444 if (deny & ~NFS4_SHARE_DENY_BOTH)
445 return nfserr_inval;
446
447 if ((deny & NFS4_SHARE_DENY_READ) &&
448 atomic_read(&fp->fi_access[O_RDONLY]))
449 return nfserr_share_denied;
450
451 if ((deny & NFS4_SHARE_DENY_WRITE) &&
452 atomic_read(&fp->fi_access[O_WRONLY]))
453 return nfserr_share_denied;
306 } 454 }
455 return nfs_ok;
307} 456}
308 457
309static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 458static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
310{ 459{
311 if (atomic_dec_and_test(&fp->fi_access[oflag])) { 460 might_lock(&fp->fi_lock);
312 nfs4_file_put_fd(fp, oflag); 461
462 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
463 struct file *f1 = NULL;
464 struct file *f2 = NULL;
465
466 swap(f1, fp->fi_fds[oflag]);
313 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 467 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
314 nfs4_file_put_fd(fp, O_RDWR); 468 swap(f2, fp->fi_fds[O_RDWR]);
469 spin_unlock(&fp->fi_lock);
470 if (f1)
471 fput(f1);
472 if (f2)
473 fput(f2);
315 } 474 }
316} 475}
317 476
318static void nfs4_file_put_access(struct nfs4_file *fp, int oflag) 477static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
319{ 478{
320 if (oflag == O_RDWR) { 479 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
321 __nfs4_file_put_access(fp, O_RDONLY); 480
481 if (access & NFS4_SHARE_ACCESS_WRITE)
322 __nfs4_file_put_access(fp, O_WRONLY); 482 __nfs4_file_put_access(fp, O_WRONLY);
323 } else 483 if (access & NFS4_SHARE_ACCESS_READ)
324 __nfs4_file_put_access(fp, oflag); 484 __nfs4_file_put_access(fp, O_RDONLY);
325} 485}
326 486
327static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct 487static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
328kmem_cache *slab) 488 struct kmem_cache *slab)
329{ 489{
330 struct idr *stateids = &cl->cl_stateids;
331 struct nfs4_stid *stid; 490 struct nfs4_stid *stid;
332 int new_id; 491 int new_id;
333 492
334 stid = kmem_cache_alloc(slab, GFP_KERNEL); 493 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
335 if (!stid) 494 if (!stid)
336 return NULL; 495 return NULL;
337 496
338 new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL); 497 idr_preload(GFP_KERNEL);
498 spin_lock(&cl->cl_lock);
499 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
500 spin_unlock(&cl->cl_lock);
501 idr_preload_end();
339 if (new_id < 0) 502 if (new_id < 0)
340 goto out_free; 503 goto out_free;
341 stid->sc_client = cl; 504 stid->sc_client = cl;
342 stid->sc_type = 0;
343 stid->sc_stateid.si_opaque.so_id = new_id; 505 stid->sc_stateid.si_opaque.so_id = new_id;
344 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 506 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
345 /* Will be incremented before return to client: */ 507 /* Will be incremented before return to client: */
346 stid->sc_stateid.si_generation = 0; 508 atomic_set(&stid->sc_count, 1);
347 509
348 /* 510 /*
349 * It shouldn't be a problem to reuse an opaque stateid value. 511 * It shouldn't be a problem to reuse an opaque stateid value.
@@ -360,9 +522,24 @@ out_free:
360 return NULL; 522 return NULL;
361} 523}
362 524
363static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp) 525static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
364{ 526{
365 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab)); 527 struct nfs4_stid *stid;
528 struct nfs4_ol_stateid *stp;
529
530 stid = nfs4_alloc_stid(clp, stateid_slab);
531 if (!stid)
532 return NULL;
533
534 stp = openlockstateid(stid);
535 stp->st_stid.sc_free = nfs4_free_ol_stateid;
536 return stp;
537}
538
539static void nfs4_free_deleg(struct nfs4_stid *stid)
540{
541 kmem_cache_free(deleg_slab, stid);
542 atomic_long_dec(&num_delegations);
366} 543}
367 544
368/* 545/*
@@ -379,10 +556,11 @@ static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
379 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 556 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
380 * low 3 bytes as hash-table indices. 557 * low 3 bytes as hash-table indices.
381 * 558 *
382 * 'state_lock', which is always held when block_delegations() is called, 559 * 'blocked_delegations_lock', which is always taken in block_delegations(),
383 * is used to manage concurrent access. Testing does not need the lock 560 * is used to manage concurrent access. Testing does not need the lock
384 * except when swapping the two filters. 561 * except when swapping the two filters.
385 */ 562 */
563static DEFINE_SPINLOCK(blocked_delegations_lock);
386static struct bloom_pair { 564static struct bloom_pair {
387 int entries, old_entries; 565 int entries, old_entries;
388 time_t swap_time; 566 time_t swap_time;
@@ -398,7 +576,7 @@ static int delegation_blocked(struct knfsd_fh *fh)
398 if (bd->entries == 0) 576 if (bd->entries == 0)
399 return 0; 577 return 0;
400 if (seconds_since_boot() - bd->swap_time > 30) { 578 if (seconds_since_boot() - bd->swap_time > 30) {
401 spin_lock(&state_lock); 579 spin_lock(&blocked_delegations_lock);
402 if (seconds_since_boot() - bd->swap_time > 30) { 580 if (seconds_since_boot() - bd->swap_time > 30) {
403 bd->entries -= bd->old_entries; 581 bd->entries -= bd->old_entries;
404 bd->old_entries = bd->entries; 582 bd->old_entries = bd->entries;
@@ -407,7 +585,7 @@ static int delegation_blocked(struct knfsd_fh *fh)
407 bd->new = 1-bd->new; 585 bd->new = 1-bd->new;
408 bd->swap_time = seconds_since_boot(); 586 bd->swap_time = seconds_since_boot();
409 } 587 }
410 spin_unlock(&state_lock); 588 spin_unlock(&blocked_delegations_lock);
411 } 589 }
412 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); 590 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
413 if (test_bit(hash&255, bd->set[0]) && 591 if (test_bit(hash&255, bd->set[0]) &&
@@ -430,69 +608,73 @@ static void block_delegations(struct knfsd_fh *fh)
430 608
431 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0); 609 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
432 610
611 spin_lock(&blocked_delegations_lock);
433 __set_bit(hash&255, bd->set[bd->new]); 612 __set_bit(hash&255, bd->set[bd->new]);
434 __set_bit((hash>>8)&255, bd->set[bd->new]); 613 __set_bit((hash>>8)&255, bd->set[bd->new]);
435 __set_bit((hash>>16)&255, bd->set[bd->new]); 614 __set_bit((hash>>16)&255, bd->set[bd->new]);
436 if (bd->entries == 0) 615 if (bd->entries == 0)
437 bd->swap_time = seconds_since_boot(); 616 bd->swap_time = seconds_since_boot();
438 bd->entries += 1; 617 bd->entries += 1;
618 spin_unlock(&blocked_delegations_lock);
439} 619}
440 620
441static struct nfs4_delegation * 621static struct nfs4_delegation *
442alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh) 622alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
443{ 623{
444 struct nfs4_delegation *dp; 624 struct nfs4_delegation *dp;
625 long n;
445 626
446 dprintk("NFSD alloc_init_deleg\n"); 627 dprintk("NFSD alloc_init_deleg\n");
447 if (num_delegations > max_delegations) 628 n = atomic_long_inc_return(&num_delegations);
448 return NULL; 629 if (n < 0 || n > max_delegations)
630 goto out_dec;
449 if (delegation_blocked(&current_fh->fh_handle)) 631 if (delegation_blocked(&current_fh->fh_handle))
450 return NULL; 632 goto out_dec;
451 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab)); 633 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
452 if (dp == NULL) 634 if (dp == NULL)
453 return dp; 635 goto out_dec;
636
637 dp->dl_stid.sc_free = nfs4_free_deleg;
454 /* 638 /*
455 * delegation seqid's are never incremented. The 4.1 special 639 * delegation seqid's are never incremented. The 4.1 special
456 * meaning of seqid 0 isn't meaningful, really, but let's avoid 640 * meaning of seqid 0 isn't meaningful, really, but let's avoid
457 * 0 anyway just for consistency and use 1: 641 * 0 anyway just for consistency and use 1:
458 */ 642 */
459 dp->dl_stid.sc_stateid.si_generation = 1; 643 dp->dl_stid.sc_stateid.si_generation = 1;
460 num_delegations++;
461 INIT_LIST_HEAD(&dp->dl_perfile); 644 INIT_LIST_HEAD(&dp->dl_perfile);
462 INIT_LIST_HEAD(&dp->dl_perclnt); 645 INIT_LIST_HEAD(&dp->dl_perclnt);
463 INIT_LIST_HEAD(&dp->dl_recall_lru); 646 INIT_LIST_HEAD(&dp->dl_recall_lru);
464 dp->dl_file = NULL;
465 dp->dl_type = NFS4_OPEN_DELEGATE_READ; 647 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
466 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle); 648 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall);
467 dp->dl_time = 0;
468 atomic_set(&dp->dl_count, 1);
469 nfsd4_init_callback(&dp->dl_recall);
470 return dp; 649 return dp;
650out_dec:
651 atomic_long_dec(&num_delegations);
652 return NULL;
471} 653}
472 654
473static void remove_stid(struct nfs4_stid *s) 655void
656nfs4_put_stid(struct nfs4_stid *s)
474{ 657{
475 struct idr *stateids = &s->sc_client->cl_stateids; 658 struct nfs4_file *fp = s->sc_file;
659 struct nfs4_client *clp = s->sc_client;
476 660
477 idr_remove(stateids, s->sc_stateid.si_opaque.so_id); 661 might_lock(&clp->cl_lock);
478}
479 662
480static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s) 663 if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
481{ 664 wake_up_all(&close_wq);
482 kmem_cache_free(slab, s); 665 return;
483}
484
485void
486nfs4_put_delegation(struct nfs4_delegation *dp)
487{
488 if (atomic_dec_and_test(&dp->dl_count)) {
489 nfs4_free_stid(deleg_slab, &dp->dl_stid);
490 num_delegations--;
491 } 666 }
667 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
668 spin_unlock(&clp->cl_lock);
669 s->sc_free(s);
670 if (fp)
671 put_nfs4_file(fp);
492} 672}
493 673
494static void nfs4_put_deleg_lease(struct nfs4_file *fp) 674static void nfs4_put_deleg_lease(struct nfs4_file *fp)
495{ 675{
676 lockdep_assert_held(&state_lock);
677
496 if (!fp->fi_lease) 678 if (!fp->fi_lease)
497 return; 679 return;
498 if (atomic_dec_and_test(&fp->fi_delegees)) { 680 if (atomic_dec_and_test(&fp->fi_delegees)) {
@@ -512,54 +694,54 @@ static void
512hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 694hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
513{ 695{
514 lockdep_assert_held(&state_lock); 696 lockdep_assert_held(&state_lock);
697 lockdep_assert_held(&fp->fi_lock);
515 698
699 atomic_inc(&dp->dl_stid.sc_count);
516 dp->dl_stid.sc_type = NFS4_DELEG_STID; 700 dp->dl_stid.sc_type = NFS4_DELEG_STID;
517 list_add(&dp->dl_perfile, &fp->fi_delegations); 701 list_add(&dp->dl_perfile, &fp->fi_delegations);
518 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); 702 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
519} 703}
520 704
521/* Called under the state lock. */
522static void 705static void
523unhash_delegation(struct nfs4_delegation *dp) 706unhash_delegation_locked(struct nfs4_delegation *dp)
524{ 707{
525 spin_lock(&state_lock); 708 struct nfs4_file *fp = dp->dl_stid.sc_file;
526 list_del_init(&dp->dl_perclnt);
527 list_del_init(&dp->dl_perfile);
528 list_del_init(&dp->dl_recall_lru);
529 spin_unlock(&state_lock);
530 if (dp->dl_file) {
531 nfs4_put_deleg_lease(dp->dl_file);
532 put_nfs4_file(dp->dl_file);
533 dp->dl_file = NULL;
534 }
535}
536
537 709
710 lockdep_assert_held(&state_lock);
538 711
539static void destroy_revoked_delegation(struct nfs4_delegation *dp) 712 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
540{ 713 /* Ensure that deleg break won't try to requeue it */
714 ++dp->dl_time;
715 spin_lock(&fp->fi_lock);
716 list_del_init(&dp->dl_perclnt);
541 list_del_init(&dp->dl_recall_lru); 717 list_del_init(&dp->dl_recall_lru);
542 remove_stid(&dp->dl_stid); 718 list_del_init(&dp->dl_perfile);
543 nfs4_put_delegation(dp); 719 spin_unlock(&fp->fi_lock);
720 if (fp)
721 nfs4_put_deleg_lease(fp);
544} 722}
545 723
546static void destroy_delegation(struct nfs4_delegation *dp) 724static void destroy_delegation(struct nfs4_delegation *dp)
547{ 725{
548 unhash_delegation(dp); 726 spin_lock(&state_lock);
549 remove_stid(&dp->dl_stid); 727 unhash_delegation_locked(dp);
550 nfs4_put_delegation(dp); 728 spin_unlock(&state_lock);
729 nfs4_put_stid(&dp->dl_stid);
551} 730}
552 731
553static void revoke_delegation(struct nfs4_delegation *dp) 732static void revoke_delegation(struct nfs4_delegation *dp)
554{ 733{
555 struct nfs4_client *clp = dp->dl_stid.sc_client; 734 struct nfs4_client *clp = dp->dl_stid.sc_client;
556 735
736 WARN_ON(!list_empty(&dp->dl_recall_lru));
737
557 if (clp->cl_minorversion == 0) 738 if (clp->cl_minorversion == 0)
558 destroy_delegation(dp); 739 nfs4_put_stid(&dp->dl_stid);
559 else { 740 else {
560 unhash_delegation(dp);
561 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; 741 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
742 spin_lock(&clp->cl_lock);
562 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 743 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
744 spin_unlock(&clp->cl_lock);
563 } 745 }
564} 746}
565 747
@@ -607,57 +789,62 @@ bmap_to_share_mode(unsigned long bmap) {
607 return access; 789 return access;
608} 790}
609 791
610static bool
611test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
612 unsigned int access, deny;
613
614 access = bmap_to_share_mode(stp->st_access_bmap);
615 deny = bmap_to_share_mode(stp->st_deny_bmap);
616 if ((access & open->op_share_deny) || (deny & open->op_share_access))
617 return false;
618 return true;
619}
620
621/* set share access for a given stateid */ 792/* set share access for a given stateid */
622static inline void 793static inline void
623set_access(u32 access, struct nfs4_ol_stateid *stp) 794set_access(u32 access, struct nfs4_ol_stateid *stp)
624{ 795{
625 __set_bit(access, &stp->st_access_bmap); 796 unsigned char mask = 1 << access;
797
798 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
799 stp->st_access_bmap |= mask;
626} 800}
627 801
628/* clear share access for a given stateid */ 802/* clear share access for a given stateid */
629static inline void 803static inline void
630clear_access(u32 access, struct nfs4_ol_stateid *stp) 804clear_access(u32 access, struct nfs4_ol_stateid *stp)
631{ 805{
632 __clear_bit(access, &stp->st_access_bmap); 806 unsigned char mask = 1 << access;
807
808 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
809 stp->st_access_bmap &= ~mask;
633} 810}
634 811
635/* test whether a given stateid has access */ 812/* test whether a given stateid has access */
636static inline bool 813static inline bool
637test_access(u32 access, struct nfs4_ol_stateid *stp) 814test_access(u32 access, struct nfs4_ol_stateid *stp)
638{ 815{
639 return test_bit(access, &stp->st_access_bmap); 816 unsigned char mask = 1 << access;
817
818 return (bool)(stp->st_access_bmap & mask);
640} 819}
641 820
642/* set share deny for a given stateid */ 821/* set share deny for a given stateid */
643static inline void 822static inline void
644set_deny(u32 access, struct nfs4_ol_stateid *stp) 823set_deny(u32 deny, struct nfs4_ol_stateid *stp)
645{ 824{
646 __set_bit(access, &stp->st_deny_bmap); 825 unsigned char mask = 1 << deny;
826
827 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
828 stp->st_deny_bmap |= mask;
647} 829}
648 830
649/* clear share deny for a given stateid */ 831/* clear share deny for a given stateid */
650static inline void 832static inline void
651clear_deny(u32 access, struct nfs4_ol_stateid *stp) 833clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
652{ 834{
653 __clear_bit(access, &stp->st_deny_bmap); 835 unsigned char mask = 1 << deny;
836
837 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
838 stp->st_deny_bmap &= ~mask;
654} 839}
655 840
656/* test whether a given stateid is denying specific access */ 841/* test whether a given stateid is denying specific access */
657static inline bool 842static inline bool
658test_deny(u32 access, struct nfs4_ol_stateid *stp) 843test_deny(u32 deny, struct nfs4_ol_stateid *stp)
659{ 844{
660 return test_bit(access, &stp->st_deny_bmap); 845 unsigned char mask = 1 << deny;
846
847 return (bool)(stp->st_deny_bmap & mask);
661} 848}
662 849
663static int nfs4_access_to_omode(u32 access) 850static int nfs4_access_to_omode(u32 access)
@@ -674,138 +861,283 @@ static int nfs4_access_to_omode(u32 access)
674 return O_RDONLY; 861 return O_RDONLY;
675} 862}
676 863
864/*
865 * A stateid that had a deny mode associated with it is being released
866 * or downgraded. Recalculate the deny mode on the file.
867 */
868static void
869recalculate_deny_mode(struct nfs4_file *fp)
870{
871 struct nfs4_ol_stateid *stp;
872
873 spin_lock(&fp->fi_lock);
874 fp->fi_share_deny = 0;
875 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
876 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
877 spin_unlock(&fp->fi_lock);
878}
879
880static void
881reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
882{
883 int i;
884 bool change = false;
885
886 for (i = 1; i < 4; i++) {
887 if ((i & deny) != i) {
888 change = true;
889 clear_deny(i, stp);
890 }
891 }
892
893 /* Recalculate per-file deny mode if there was a change */
894 if (change)
895 recalculate_deny_mode(stp->st_stid.sc_file);
896}
897
677/* release all access and file references for a given stateid */ 898/* release all access and file references for a given stateid */
678static void 899static void
679release_all_access(struct nfs4_ol_stateid *stp) 900release_all_access(struct nfs4_ol_stateid *stp)
680{ 901{
681 int i; 902 int i;
903 struct nfs4_file *fp = stp->st_stid.sc_file;
904
905 if (fp && stp->st_deny_bmap != 0)
906 recalculate_deny_mode(fp);
682 907
683 for (i = 1; i < 4; i++) { 908 for (i = 1; i < 4; i++) {
684 if (test_access(i, stp)) 909 if (test_access(i, stp))
685 nfs4_file_put_access(stp->st_file, 910 nfs4_file_put_access(stp->st_stid.sc_file, i);
686 nfs4_access_to_omode(i));
687 clear_access(i, stp); 911 clear_access(i, stp);
688 } 912 }
689} 913}
690 914
691static void unhash_generic_stateid(struct nfs4_ol_stateid *stp) 915static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
692{ 916{
917 struct nfs4_client *clp = sop->so_client;
918
919 might_lock(&clp->cl_lock);
920
921 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
922 return;
923 sop->so_ops->so_unhash(sop);
924 spin_unlock(&clp->cl_lock);
925 kfree(sop->so_owner.data);
926 sop->so_ops->so_free(sop);
927}
928
929static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
930{
931 struct nfs4_file *fp = stp->st_stid.sc_file;
932
933 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
934
935 spin_lock(&fp->fi_lock);
693 list_del(&stp->st_perfile); 936 list_del(&stp->st_perfile);
937 spin_unlock(&fp->fi_lock);
694 list_del(&stp->st_perstateowner); 938 list_del(&stp->st_perstateowner);
695} 939}
696 940
697static void close_generic_stateid(struct nfs4_ol_stateid *stp) 941static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
698{ 942{
943 struct nfs4_ol_stateid *stp = openlockstateid(stid);
944
699 release_all_access(stp); 945 release_all_access(stp);
700 put_nfs4_file(stp->st_file); 946 if (stp->st_stateowner)
701 stp->st_file = NULL; 947 nfs4_put_stateowner(stp->st_stateowner);
948 kmem_cache_free(stateid_slab, stid);
702} 949}
703 950
704static void free_generic_stateid(struct nfs4_ol_stateid *stp) 951static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
705{ 952{
706 remove_stid(&stp->st_stid); 953 struct nfs4_ol_stateid *stp = openlockstateid(stid);
707 nfs4_free_stid(stateid_slab, &stp->st_stid); 954 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
955 struct file *file;
956
957 file = find_any_file(stp->st_stid.sc_file);
958 if (file)
959 filp_close(file, (fl_owner_t)lo);
960 nfs4_free_ol_stateid(stid);
708} 961}
709 962
710static void release_lock_stateid(struct nfs4_ol_stateid *stp) 963/*
964 * Put the persistent reference to an already unhashed generic stateid, while
965 * holding the cl_lock. If it's the last reference, then put it onto the
966 * reaplist for later destruction.
967 */
968static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
969 struct list_head *reaplist)
711{ 970{
712 struct file *file; 971 struct nfs4_stid *s = &stp->st_stid;
972 struct nfs4_client *clp = s->sc_client;
973
974 lockdep_assert_held(&clp->cl_lock);
713 975
714 unhash_generic_stateid(stp); 976 WARN_ON_ONCE(!list_empty(&stp->st_locks));
977
978 if (!atomic_dec_and_test(&s->sc_count)) {
979 wake_up_all(&close_wq);
980 return;
981 }
982
983 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
984 list_add(&stp->st_locks, reaplist);
985}
986
987static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
988{
989 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
990
991 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
992
993 list_del_init(&stp->st_locks);
994 unhash_ol_stateid(stp);
715 unhash_stid(&stp->st_stid); 995 unhash_stid(&stp->st_stid);
716 file = find_any_file(stp->st_file);
717 if (file)
718 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
719 close_generic_stateid(stp);
720 free_generic_stateid(stp);
721} 996}
722 997
723static void unhash_lockowner(struct nfs4_lockowner *lo) 998static void release_lock_stateid(struct nfs4_ol_stateid *stp)
724{ 999{
725 struct nfs4_ol_stateid *stp; 1000 struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
726 1001
727 list_del(&lo->lo_owner.so_strhash); 1002 spin_lock(&oo->oo_owner.so_client->cl_lock);
728 list_del(&lo->lo_perstateid); 1003 unhash_lock_stateid(stp);
729 list_del(&lo->lo_owner_ino_hash); 1004 spin_unlock(&oo->oo_owner.so_client->cl_lock);
730 while (!list_empty(&lo->lo_owner.so_stateids)) { 1005 nfs4_put_stid(&stp->st_stid);
731 stp = list_first_entry(&lo->lo_owner.so_stateids,
732 struct nfs4_ol_stateid, st_perstateowner);
733 release_lock_stateid(stp);
734 }
735} 1006}
736 1007
737static void nfs4_free_lockowner(struct nfs4_lockowner *lo) 1008static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
738{ 1009{
739 kfree(lo->lo_owner.so_owner.data); 1010 struct nfs4_client *clp = lo->lo_owner.so_client;
740 kmem_cache_free(lockowner_slab, lo); 1011
1012 lockdep_assert_held(&clp->cl_lock);
1013
1014 list_del_init(&lo->lo_owner.so_strhash);
1015}
1016
1017/*
1018 * Free a list of generic stateids that were collected earlier after being
1019 * fully unhashed.
1020 */
1021static void
1022free_ol_stateid_reaplist(struct list_head *reaplist)
1023{
1024 struct nfs4_ol_stateid *stp;
1025 struct nfs4_file *fp;
1026
1027 might_sleep();
1028
1029 while (!list_empty(reaplist)) {
1030 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1031 st_locks);
1032 list_del(&stp->st_locks);
1033 fp = stp->st_stid.sc_file;
1034 stp->st_stid.sc_free(&stp->st_stid);
1035 if (fp)
1036 put_nfs4_file(fp);
1037 }
741} 1038}
742 1039
743static void release_lockowner(struct nfs4_lockowner *lo) 1040static void release_lockowner(struct nfs4_lockowner *lo)
744{ 1041{
745 unhash_lockowner(lo); 1042 struct nfs4_client *clp = lo->lo_owner.so_client;
746 nfs4_free_lockowner(lo); 1043 struct nfs4_ol_stateid *stp;
1044 struct list_head reaplist;
1045
1046 INIT_LIST_HEAD(&reaplist);
1047
1048 spin_lock(&clp->cl_lock);
1049 unhash_lockowner_locked(lo);
1050 while (!list_empty(&lo->lo_owner.so_stateids)) {
1051 stp = list_first_entry(&lo->lo_owner.so_stateids,
1052 struct nfs4_ol_stateid, st_perstateowner);
1053 unhash_lock_stateid(stp);
1054 put_ol_stateid_locked(stp, &reaplist);
1055 }
1056 spin_unlock(&clp->cl_lock);
1057 free_ol_stateid_reaplist(&reaplist);
1058 nfs4_put_stateowner(&lo->lo_owner);
747} 1059}
748 1060
749static void 1061static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
750release_stateid_lockowners(struct nfs4_ol_stateid *open_stp) 1062 struct list_head *reaplist)
751{ 1063{
752 struct nfs4_lockowner *lo; 1064 struct nfs4_ol_stateid *stp;
753 1065
754 while (!list_empty(&open_stp->st_lockowners)) { 1066 while (!list_empty(&open_stp->st_locks)) {
755 lo = list_entry(open_stp->st_lockowners.next, 1067 stp = list_entry(open_stp->st_locks.next,
756 struct nfs4_lockowner, lo_perstateid); 1068 struct nfs4_ol_stateid, st_locks);
757 release_lockowner(lo); 1069 unhash_lock_stateid(stp);
1070 put_ol_stateid_locked(stp, reaplist);
758 } 1071 }
759} 1072}
760 1073
761static void unhash_open_stateid(struct nfs4_ol_stateid *stp) 1074static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
1075 struct list_head *reaplist)
762{ 1076{
763 unhash_generic_stateid(stp); 1077 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
764 release_stateid_lockowners(stp); 1078
765 close_generic_stateid(stp); 1079 unhash_ol_stateid(stp);
1080 release_open_stateid_locks(stp, reaplist);
766} 1081}
767 1082
768static void release_open_stateid(struct nfs4_ol_stateid *stp) 1083static void release_open_stateid(struct nfs4_ol_stateid *stp)
769{ 1084{
770 unhash_open_stateid(stp); 1085 LIST_HEAD(reaplist);
771 free_generic_stateid(stp); 1086
1087 spin_lock(&stp->st_stid.sc_client->cl_lock);
1088 unhash_open_stateid(stp, &reaplist);
1089 put_ol_stateid_locked(stp, &reaplist);
1090 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1091 free_ol_stateid_reaplist(&reaplist);
772} 1092}
773 1093
774static void unhash_openowner(struct nfs4_openowner *oo) 1094static void unhash_openowner_locked(struct nfs4_openowner *oo)
775{ 1095{
776 struct nfs4_ol_stateid *stp; 1096 struct nfs4_client *clp = oo->oo_owner.so_client;
777 1097
778 list_del(&oo->oo_owner.so_strhash); 1098 lockdep_assert_held(&clp->cl_lock);
779 list_del(&oo->oo_perclient); 1099
780 while (!list_empty(&oo->oo_owner.so_stateids)) { 1100 list_del_init(&oo->oo_owner.so_strhash);
781 stp = list_first_entry(&oo->oo_owner.so_stateids, 1101 list_del_init(&oo->oo_perclient);
782 struct nfs4_ol_stateid, st_perstateowner);
783 release_open_stateid(stp);
784 }
785} 1102}
786 1103
787static void release_last_closed_stateid(struct nfs4_openowner *oo) 1104static void release_last_closed_stateid(struct nfs4_openowner *oo)
788{ 1105{
789 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid; 1106 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1107 nfsd_net_id);
1108 struct nfs4_ol_stateid *s;
790 1109
1110 spin_lock(&nn->client_lock);
1111 s = oo->oo_last_closed_stid;
791 if (s) { 1112 if (s) {
792 free_generic_stateid(s); 1113 list_del_init(&oo->oo_close_lru);
793 oo->oo_last_closed_stid = NULL; 1114 oo->oo_last_closed_stid = NULL;
794 } 1115 }
795} 1116 spin_unlock(&nn->client_lock);
796 1117 if (s)
797static void nfs4_free_openowner(struct nfs4_openowner *oo) 1118 nfs4_put_stid(&s->st_stid);
798{
799 kfree(oo->oo_owner.so_owner.data);
800 kmem_cache_free(openowner_slab, oo);
801} 1119}
802 1120
803static void release_openowner(struct nfs4_openowner *oo) 1121static void release_openowner(struct nfs4_openowner *oo)
804{ 1122{
805 unhash_openowner(oo); 1123 struct nfs4_ol_stateid *stp;
806 list_del(&oo->oo_close_lru); 1124 struct nfs4_client *clp = oo->oo_owner.so_client;
1125 struct list_head reaplist;
1126
1127 INIT_LIST_HEAD(&reaplist);
1128
1129 spin_lock(&clp->cl_lock);
1130 unhash_openowner_locked(oo);
1131 while (!list_empty(&oo->oo_owner.so_stateids)) {
1132 stp = list_first_entry(&oo->oo_owner.so_stateids,
1133 struct nfs4_ol_stateid, st_perstateowner);
1134 unhash_open_stateid(stp, &reaplist);
1135 put_ol_stateid_locked(stp, &reaplist);
1136 }
1137 spin_unlock(&clp->cl_lock);
1138 free_ol_stateid_reaplist(&reaplist);
807 release_last_closed_stateid(oo); 1139 release_last_closed_stateid(oo);
808 nfs4_free_openowner(oo); 1140 nfs4_put_stateowner(&oo->oo_owner);
809} 1141}
810 1142
811static inline int 1143static inline int
@@ -842,7 +1174,7 @@ void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
842 return; 1174 return;
843 1175
844 if (!seqid_mutating_err(ntohl(nfserr))) { 1176 if (!seqid_mutating_err(ntohl(nfserr))) {
845 cstate->replay_owner = NULL; 1177 nfsd4_cstate_clear_replay(cstate);
846 return; 1178 return;
847 } 1179 }
848 if (!so) 1180 if (!so)
@@ -1030,10 +1362,8 @@ static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, str
1030 if (ret) 1362 if (ret)
1031 /* oops; xprt is already down: */ 1363 /* oops; xprt is already down: */
1032 nfsd4_conn_lost(&conn->cn_xpt_user); 1364 nfsd4_conn_lost(&conn->cn_xpt_user);
1033 if (conn->cn_flags & NFS4_CDFC4_BACK) { 1365 /* We may have gained or lost a callback channel: */
1034 /* callback channel may be back up */ 1366 nfsd4_probe_callback_sync(ses->se_client);
1035 nfsd4_probe_callback(ses->se_client);
1036 }
1037} 1367}
1038 1368
1039static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 1369static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
@@ -1073,9 +1403,6 @@ static void __free_session(struct nfsd4_session *ses)
1073 1403
1074static void free_session(struct nfsd4_session *ses) 1404static void free_session(struct nfsd4_session *ses)
1075{ 1405{
1076 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
1077
1078 lockdep_assert_held(&nn->client_lock);
1079 nfsd4_del_conns(ses); 1406 nfsd4_del_conns(ses);
1080 nfsd4_put_drc_mem(&ses->se_fchannel); 1407 nfsd4_put_drc_mem(&ses->se_fchannel);
1081 __free_session(ses); 1408 __free_session(ses);
@@ -1097,12 +1424,10 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
1097 new->se_cb_sec = cses->cb_sec; 1424 new->se_cb_sec = cses->cb_sec;
1098 atomic_set(&new->se_ref, 0); 1425 atomic_set(&new->se_ref, 0);
1099 idx = hash_sessionid(&new->se_sessionid); 1426 idx = hash_sessionid(&new->se_sessionid);
1100 spin_lock(&nn->client_lock);
1101 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 1427 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1102 spin_lock(&clp->cl_lock); 1428 spin_lock(&clp->cl_lock);
1103 list_add(&new->se_perclnt, &clp->cl_sessions); 1429 list_add(&new->se_perclnt, &clp->cl_sessions);
1104 spin_unlock(&clp->cl_lock); 1430 spin_unlock(&clp->cl_lock);
1105 spin_unlock(&nn->client_lock);
1106 1431
1107 if (cses->flags & SESSION4_BACK_CHAN) { 1432 if (cses->flags & SESSION4_BACK_CHAN) {
1108 struct sockaddr *sa = svc_addr(rqstp); 1433 struct sockaddr *sa = svc_addr(rqstp);
@@ -1120,12 +1445,14 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru
1120 1445
1121/* caller must hold client_lock */ 1446/* caller must hold client_lock */
1122static struct nfsd4_session * 1447static struct nfsd4_session *
1123find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 1448__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1124{ 1449{
1125 struct nfsd4_session *elem; 1450 struct nfsd4_session *elem;
1126 int idx; 1451 int idx;
1127 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1452 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1128 1453
1454 lockdep_assert_held(&nn->client_lock);
1455
1129 dump_sessionid(__func__, sessionid); 1456 dump_sessionid(__func__, sessionid);
1130 idx = hash_sessionid(sessionid); 1457 idx = hash_sessionid(sessionid);
1131 /* Search in the appropriate list */ 1458 /* Search in the appropriate list */
@@ -1140,10 +1467,33 @@ find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1140 return NULL; 1467 return NULL;
1141} 1468}
1142 1469
1470static struct nfsd4_session *
1471find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1472 __be32 *ret)
1473{
1474 struct nfsd4_session *session;
1475 __be32 status = nfserr_badsession;
1476
1477 session = __find_in_sessionid_hashtbl(sessionid, net);
1478 if (!session)
1479 goto out;
1480 status = nfsd4_get_session_locked(session);
1481 if (status)
1482 session = NULL;
1483out:
1484 *ret = status;
1485 return session;
1486}
1487
1143/* caller must hold client_lock */ 1488/* caller must hold client_lock */
1144static void 1489static void
1145unhash_session(struct nfsd4_session *ses) 1490unhash_session(struct nfsd4_session *ses)
1146{ 1491{
1492 struct nfs4_client *clp = ses->se_client;
1493 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1494
1495 lockdep_assert_held(&nn->client_lock);
1496
1147 list_del(&ses->se_hash); 1497 list_del(&ses->se_hash);
1148 spin_lock(&ses->se_client->cl_lock); 1498 spin_lock(&ses->se_client->cl_lock);
1149 list_del(&ses->se_perclnt); 1499 list_del(&ses->se_perclnt);
@@ -1169,15 +1519,20 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1169static struct nfs4_client *alloc_client(struct xdr_netobj name) 1519static struct nfs4_client *alloc_client(struct xdr_netobj name)
1170{ 1520{
1171 struct nfs4_client *clp; 1521 struct nfs4_client *clp;
1522 int i;
1172 1523
1173 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL); 1524 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1174 if (clp == NULL) 1525 if (clp == NULL)
1175 return NULL; 1526 return NULL;
1176 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL); 1527 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1177 if (clp->cl_name.data == NULL) { 1528 if (clp->cl_name.data == NULL)
1178 kfree(clp); 1529 goto err_no_name;
1179 return NULL; 1530 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1180 } 1531 OWNER_HASH_SIZE, GFP_KERNEL);
1532 if (!clp->cl_ownerstr_hashtbl)
1533 goto err_no_hashtbl;
1534 for (i = 0; i < OWNER_HASH_SIZE; i++)
1535 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1181 clp->cl_name.len = name.len; 1536 clp->cl_name.len = name.len;
1182 INIT_LIST_HEAD(&clp->cl_sessions); 1537 INIT_LIST_HEAD(&clp->cl_sessions);
1183 idr_init(&clp->cl_stateids); 1538 idr_init(&clp->cl_stateids);
@@ -1192,14 +1547,16 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
1192 spin_lock_init(&clp->cl_lock); 1547 spin_lock_init(&clp->cl_lock);
1193 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1548 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1194 return clp; 1549 return clp;
1550err_no_hashtbl:
1551 kfree(clp->cl_name.data);
1552err_no_name:
1553 kfree(clp);
1554 return NULL;
1195} 1555}
1196 1556
1197static void 1557static void
1198free_client(struct nfs4_client *clp) 1558free_client(struct nfs4_client *clp)
1199{ 1559{
1200 struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
1201
1202 lockdep_assert_held(&nn->client_lock);
1203 while (!list_empty(&clp->cl_sessions)) { 1560 while (!list_empty(&clp->cl_sessions)) {
1204 struct nfsd4_session *ses; 1561 struct nfsd4_session *ses;
1205 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1562 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
@@ -1210,18 +1567,32 @@ free_client(struct nfs4_client *clp)
1210 } 1567 }
1211 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 1568 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1212 free_svc_cred(&clp->cl_cred); 1569 free_svc_cred(&clp->cl_cred);
1570 kfree(clp->cl_ownerstr_hashtbl);
1213 kfree(clp->cl_name.data); 1571 kfree(clp->cl_name.data);
1214 idr_destroy(&clp->cl_stateids); 1572 idr_destroy(&clp->cl_stateids);
1215 kfree(clp); 1573 kfree(clp);
1216} 1574}
1217 1575
1218/* must be called under the client_lock */ 1576/* must be called under the client_lock */
1219static inline void 1577static void
1220unhash_client_locked(struct nfs4_client *clp) 1578unhash_client_locked(struct nfs4_client *clp)
1221{ 1579{
1580 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1222 struct nfsd4_session *ses; 1581 struct nfsd4_session *ses;
1223 1582
1224 list_del(&clp->cl_lru); 1583 lockdep_assert_held(&nn->client_lock);
1584
1585 /* Mark the client as expired! */
1586 clp->cl_time = 0;
1587 /* Make it invisible */
1588 if (!list_empty(&clp->cl_idhash)) {
1589 list_del_init(&clp->cl_idhash);
1590 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1591 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1592 else
1593 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1594 }
1595 list_del_init(&clp->cl_lru);
1225 spin_lock(&clp->cl_lock); 1596 spin_lock(&clp->cl_lock);
1226 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 1597 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1227 list_del_init(&ses->se_hash); 1598 list_del_init(&ses->se_hash);
@@ -1229,53 +1600,71 @@ unhash_client_locked(struct nfs4_client *clp)
1229} 1600}
1230 1601
1231static void 1602static void
1232destroy_client(struct nfs4_client *clp) 1603unhash_client(struct nfs4_client *clp)
1604{
1605 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1606
1607 spin_lock(&nn->client_lock);
1608 unhash_client_locked(clp);
1609 spin_unlock(&nn->client_lock);
1610}
1611
1612static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1613{
1614 if (atomic_read(&clp->cl_refcount))
1615 return nfserr_jukebox;
1616 unhash_client_locked(clp);
1617 return nfs_ok;
1618}
1619
1620static void
1621__destroy_client(struct nfs4_client *clp)
1233{ 1622{
1234 struct nfs4_openowner *oo; 1623 struct nfs4_openowner *oo;
1235 struct nfs4_delegation *dp; 1624 struct nfs4_delegation *dp;
1236 struct list_head reaplist; 1625 struct list_head reaplist;
1237 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1238 1626
1239 INIT_LIST_HEAD(&reaplist); 1627 INIT_LIST_HEAD(&reaplist);
1240 spin_lock(&state_lock); 1628 spin_lock(&state_lock);
1241 while (!list_empty(&clp->cl_delegations)) { 1629 while (!list_empty(&clp->cl_delegations)) {
1242 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 1630 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1243 list_del_init(&dp->dl_perclnt); 1631 unhash_delegation_locked(dp);
1244 list_move(&dp->dl_recall_lru, &reaplist); 1632 list_add(&dp->dl_recall_lru, &reaplist);
1245 } 1633 }
1246 spin_unlock(&state_lock); 1634 spin_unlock(&state_lock);
1247 while (!list_empty(&reaplist)) { 1635 while (!list_empty(&reaplist)) {
1248 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1636 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1249 destroy_delegation(dp); 1637 list_del_init(&dp->dl_recall_lru);
1638 nfs4_put_stid(&dp->dl_stid);
1250 } 1639 }
1251 list_splice_init(&clp->cl_revoked, &reaplist); 1640 while (!list_empty(&clp->cl_revoked)) {
1252 while (!list_empty(&reaplist)) {
1253 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 1641 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1254 destroy_revoked_delegation(dp); 1642 list_del_init(&dp->dl_recall_lru);
1643 nfs4_put_stid(&dp->dl_stid);
1255 } 1644 }
1256 while (!list_empty(&clp->cl_openowners)) { 1645 while (!list_empty(&clp->cl_openowners)) {
1257 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 1646 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1647 atomic_inc(&oo->oo_owner.so_count);
1258 release_openowner(oo); 1648 release_openowner(oo);
1259 } 1649 }
1260 nfsd4_shutdown_callback(clp); 1650 nfsd4_shutdown_callback(clp);
1261 if (clp->cl_cb_conn.cb_xprt) 1651 if (clp->cl_cb_conn.cb_xprt)
1262 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 1652 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1263 list_del(&clp->cl_idhash);
1264 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1265 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1266 else
1267 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1268 spin_lock(&nn->client_lock);
1269 unhash_client_locked(clp);
1270 WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
1271 free_client(clp); 1653 free_client(clp);
1272 spin_unlock(&nn->client_lock); 1654}
1655
1656static void
1657destroy_client(struct nfs4_client *clp)
1658{
1659 unhash_client(clp);
1660 __destroy_client(clp);
1273} 1661}
1274 1662
1275static void expire_client(struct nfs4_client *clp) 1663static void expire_client(struct nfs4_client *clp)
1276{ 1664{
1665 unhash_client(clp);
1277 nfsd4_client_record_remove(clp); 1666 nfsd4_client_record_remove(clp);
1278 destroy_client(clp); 1667 __destroy_client(clp);
1279} 1668}
1280 1669
1281static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 1670static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
@@ -1408,25 +1797,28 @@ static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1408 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 1797 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1409} 1798}
1410 1799
1411static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 1800static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
1412{ 1801{
1413 static u32 current_clientid = 1; 1802 __be32 verf[2];
1414 1803
1415 clp->cl_clientid.cl_boot = nn->boot_time; 1804 /*
1416 clp->cl_clientid.cl_id = current_clientid++; 1805 * This is opaque to client, so no need to byte-swap. Use
1806 * __force to keep sparse happy
1807 */
1808 verf[0] = (__force __be32)get_seconds();
1809 verf[1] = (__force __be32)nn->clientid_counter;
1810 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1417} 1811}
1418 1812
1419static void gen_confirm(struct nfs4_client *clp) 1813static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1420{ 1814{
1421 __be32 verf[2]; 1815 clp->cl_clientid.cl_boot = nn->boot_time;
1422 static u32 i; 1816 clp->cl_clientid.cl_id = nn->clientid_counter++;
1423 1817 gen_confirm(clp, nn);
1424 verf[0] = (__be32)get_seconds();
1425 verf[1] = (__be32)i++;
1426 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1427} 1818}
1428 1819
1429static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t) 1820static struct nfs4_stid *
1821find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1430{ 1822{
1431 struct nfs4_stid *ret; 1823 struct nfs4_stid *ret;
1432 1824
@@ -1436,16 +1828,21 @@ static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1436 return ret; 1828 return ret;
1437} 1829}
1438 1830
1439static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) 1831static struct nfs4_stid *
1832find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1440{ 1833{
1441 struct nfs4_stid *s; 1834 struct nfs4_stid *s;
1442 1835
1443 s = find_stateid(cl, t); 1836 spin_lock(&cl->cl_lock);
1444 if (!s) 1837 s = find_stateid_locked(cl, t);
1445 return NULL; 1838 if (s != NULL) {
1446 if (typemask & s->sc_type) 1839 if (typemask & s->sc_type)
1447 return s; 1840 atomic_inc(&s->sc_count);
1448 return NULL; 1841 else
1842 s = NULL;
1843 }
1844 spin_unlock(&cl->cl_lock);
1845 return s;
1449} 1846}
1450 1847
1451static struct nfs4_client *create_client(struct xdr_netobj name, 1848static struct nfs4_client *create_client(struct xdr_netobj name,
@@ -1455,7 +1852,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
1455 struct sockaddr *sa = svc_addr(rqstp); 1852 struct sockaddr *sa = svc_addr(rqstp);
1456 int ret; 1853 int ret;
1457 struct net *net = SVC_NET(rqstp); 1854 struct net *net = SVC_NET(rqstp);
1458 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1459 1855
1460 clp = alloc_client(name); 1856 clp = alloc_client(name);
1461 if (clp == NULL) 1857 if (clp == NULL)
@@ -1463,17 +1859,14 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
1463 1859
1464 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 1860 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1465 if (ret) { 1861 if (ret) {
1466 spin_lock(&nn->client_lock);
1467 free_client(clp); 1862 free_client(clp);
1468 spin_unlock(&nn->client_lock);
1469 return NULL; 1863 return NULL;
1470 } 1864 }
1471 nfsd4_init_callback(&clp->cl_cb_null); 1865 INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null);
1472 clp->cl_time = get_seconds(); 1866 clp->cl_time = get_seconds();
1473 clear_bit(0, &clp->cl_cb_slot_busy); 1867 clear_bit(0, &clp->cl_cb_slot_busy);
1474 copy_verf(clp, verf); 1868 copy_verf(clp, verf);
1475 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1869 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1476 gen_confirm(clp);
1477 clp->cl_cb_session = NULL; 1870 clp->cl_cb_session = NULL;
1478 clp->net = net; 1871 clp->net = net;
1479 return clp; 1872 return clp;
@@ -1525,11 +1918,13 @@ add_to_unconfirmed(struct nfs4_client *clp)
1525 unsigned int idhashval; 1918 unsigned int idhashval;
1526 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1919 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1527 1920
1921 lockdep_assert_held(&nn->client_lock);
1922
1528 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 1923 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1529 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 1924 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1530 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1925 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1531 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 1926 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1532 renew_client(clp); 1927 renew_client_locked(clp);
1533} 1928}
1534 1929
1535static void 1930static void
@@ -1538,12 +1933,14 @@ move_to_confirmed(struct nfs4_client *clp)
1538 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 1933 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1539 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 1934 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1540 1935
1936 lockdep_assert_held(&nn->client_lock);
1937
1541 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp); 1938 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1542 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 1939 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1543 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 1940 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1544 add_clp_to_name_tree(clp, &nn->conf_name_tree); 1941 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1545 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 1942 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1546 renew_client(clp); 1943 renew_client_locked(clp);
1547} 1944}
1548 1945
1549static struct nfs4_client * 1946static struct nfs4_client *
@@ -1556,7 +1953,7 @@ find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1556 if (same_clid(&clp->cl_clientid, clid)) { 1953 if (same_clid(&clp->cl_clientid, clid)) {
1557 if ((bool)clp->cl_minorversion != sessions) 1954 if ((bool)clp->cl_minorversion != sessions)
1558 return NULL; 1955 return NULL;
1559 renew_client(clp); 1956 renew_client_locked(clp);
1560 return clp; 1957 return clp;
1561 } 1958 }
1562 } 1959 }
@@ -1568,6 +1965,7 @@ find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1568{ 1965{
1569 struct list_head *tbl = nn->conf_id_hashtbl; 1966 struct list_head *tbl = nn->conf_id_hashtbl;
1570 1967
1968 lockdep_assert_held(&nn->client_lock);
1571 return find_client_in_id_table(tbl, clid, sessions); 1969 return find_client_in_id_table(tbl, clid, sessions);
1572} 1970}
1573 1971
@@ -1576,6 +1974,7 @@ find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1576{ 1974{
1577 struct list_head *tbl = nn->unconf_id_hashtbl; 1975 struct list_head *tbl = nn->unconf_id_hashtbl;
1578 1976
1977 lockdep_assert_held(&nn->client_lock);
1579 return find_client_in_id_table(tbl, clid, sessions); 1978 return find_client_in_id_table(tbl, clid, sessions);
1580} 1979}
1581 1980
@@ -1587,12 +1986,14 @@ static bool clp_used_exchangeid(struct nfs4_client *clp)
1587static struct nfs4_client * 1986static struct nfs4_client *
1588find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 1987find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1589{ 1988{
1989 lockdep_assert_held(&nn->client_lock);
1590 return find_clp_in_name_tree(name, &nn->conf_name_tree); 1990 return find_clp_in_name_tree(name, &nn->conf_name_tree);
1591} 1991}
1592 1992
1593static struct nfs4_client * 1993static struct nfs4_client *
1594find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 1994find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1595{ 1995{
1996 lockdep_assert_held(&nn->client_lock);
1596 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 1997 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1597} 1998}
1598 1999
@@ -1642,7 +2043,7 @@ out_err:
1642/* 2043/*
1643 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 2044 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
1644 */ 2045 */
1645void 2046static void
1646nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 2047nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1647{ 2048{
1648 struct xdr_buf *buf = resp->xdr.buf; 2049 struct xdr_buf *buf = resp->xdr.buf;
@@ -1758,7 +2159,8 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
1758 struct nfsd4_compound_state *cstate, 2159 struct nfsd4_compound_state *cstate,
1759 struct nfsd4_exchange_id *exid) 2160 struct nfsd4_exchange_id *exid)
1760{ 2161{
1761 struct nfs4_client *unconf, *conf, *new; 2162 struct nfs4_client *conf, *new;
2163 struct nfs4_client *unconf = NULL;
1762 __be32 status; 2164 __be32 status;
1763 char addr_str[INET6_ADDRSTRLEN]; 2165 char addr_str[INET6_ADDRSTRLEN];
1764 nfs4_verifier verf = exid->verifier; 2166 nfs4_verifier verf = exid->verifier;
@@ -1787,8 +2189,12 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
1787 return nfserr_encr_alg_unsupp; 2189 return nfserr_encr_alg_unsupp;
1788 } 2190 }
1789 2191
2192 new = create_client(exid->clname, rqstp, &verf);
2193 if (new == NULL)
2194 return nfserr_jukebox;
2195
1790 /* Cases below refer to rfc 5661 section 18.35.4: */ 2196 /* Cases below refer to rfc 5661 section 18.35.4: */
1791 nfs4_lock_state(); 2197 spin_lock(&nn->client_lock);
1792 conf = find_confirmed_client_by_name(&exid->clname, nn); 2198 conf = find_confirmed_client_by_name(&exid->clname, nn);
1793 if (conf) { 2199 if (conf) {
1794 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 2200 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
@@ -1813,7 +2219,6 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
1813 } 2219 }
1814 /* case 6 */ 2220 /* case 6 */
1815 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 2221 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1816 new = conf;
1817 goto out_copy; 2222 goto out_copy;
1818 } 2223 }
1819 if (!creds_match) { /* case 3 */ 2224 if (!creds_match) { /* case 3 */
@@ -1821,15 +2226,14 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
1821 status = nfserr_clid_inuse; 2226 status = nfserr_clid_inuse;
1822 goto out; 2227 goto out;
1823 } 2228 }
1824 expire_client(conf);
1825 goto out_new; 2229 goto out_new;
1826 } 2230 }
1827 if (verfs_match) { /* case 2 */ 2231 if (verfs_match) { /* case 2 */
1828 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 2232 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1829 new = conf;
1830 goto out_copy; 2233 goto out_copy;
1831 } 2234 }
1832 /* case 5, client reboot */ 2235 /* case 5, client reboot */
2236 conf = NULL;
1833 goto out_new; 2237 goto out_new;
1834 } 2238 }
1835 2239
@@ -1840,33 +2244,38 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
1840 2244
1841 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 2245 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
1842 if (unconf) /* case 4, possible retry or client restart */ 2246 if (unconf) /* case 4, possible retry or client restart */
1843 expire_client(unconf); 2247 unhash_client_locked(unconf);
1844 2248
1845 /* case 1 (normal case) */ 2249 /* case 1 (normal case) */
1846out_new: 2250out_new:
1847 new = create_client(exid->clname, rqstp, &verf); 2251 if (conf) {
1848 if (new == NULL) { 2252 status = mark_client_expired_locked(conf);
1849 status = nfserr_jukebox; 2253 if (status)
1850 goto out; 2254 goto out;
1851 } 2255 }
1852 new->cl_minorversion = cstate->minorversion; 2256 new->cl_minorversion = cstate->minorversion;
1853 new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED); 2257 new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
1854 2258
1855 gen_clid(new, nn); 2259 gen_clid(new, nn);
1856 add_to_unconfirmed(new); 2260 add_to_unconfirmed(new);
2261 swap(new, conf);
1857out_copy: 2262out_copy:
1858 exid->clientid.cl_boot = new->cl_clientid.cl_boot; 2263 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
1859 exid->clientid.cl_id = new->cl_clientid.cl_id; 2264 exid->clientid.cl_id = conf->cl_clientid.cl_id;
1860 2265
1861 exid->seqid = new->cl_cs_slot.sl_seqid + 1; 2266 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
1862 nfsd4_set_ex_flags(new, exid); 2267 nfsd4_set_ex_flags(conf, exid);
1863 2268
1864 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 2269 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1865 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags); 2270 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
1866 status = nfs_ok; 2271 status = nfs_ok;
1867 2272
1868out: 2273out:
1869 nfs4_unlock_state(); 2274 spin_unlock(&nn->client_lock);
2275 if (new)
2276 expire_client(new);
2277 if (unconf)
2278 expire_client(unconf);
1870 return status; 2279 return status;
1871} 2280}
1872 2281
@@ -2010,6 +2419,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
2010{ 2419{
2011 struct sockaddr *sa = svc_addr(rqstp); 2420 struct sockaddr *sa = svc_addr(rqstp);
2012 struct nfs4_client *conf, *unconf; 2421 struct nfs4_client *conf, *unconf;
2422 struct nfs4_client *old = NULL;
2013 struct nfsd4_session *new; 2423 struct nfsd4_session *new;
2014 struct nfsd4_conn *conn; 2424 struct nfsd4_conn *conn;
2015 struct nfsd4_clid_slot *cs_slot = NULL; 2425 struct nfsd4_clid_slot *cs_slot = NULL;
@@ -2035,7 +2445,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
2035 if (!conn) 2445 if (!conn)
2036 goto out_free_session; 2446 goto out_free_session;
2037 2447
2038 nfs4_lock_state(); 2448 spin_lock(&nn->client_lock);
2039 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 2449 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2040 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 2450 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2041 WARN_ON_ONCE(conf && unconf); 2451 WARN_ON_ONCE(conf && unconf);
@@ -2054,7 +2464,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
2054 goto out_free_conn; 2464 goto out_free_conn;
2055 } 2465 }
2056 } else if (unconf) { 2466 } else if (unconf) {
2057 struct nfs4_client *old;
2058 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 2467 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2059 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 2468 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2060 status = nfserr_clid_inuse; 2469 status = nfserr_clid_inuse;
@@ -2072,10 +2481,11 @@ nfsd4_create_session(struct svc_rqst *rqstp,
2072 } 2481 }
2073 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 2482 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2074 if (old) { 2483 if (old) {
2075 status = mark_client_expired(old); 2484 status = mark_client_expired_locked(old);
2076 if (status) 2485 if (status) {
2486 old = NULL;
2077 goto out_free_conn; 2487 goto out_free_conn;
2078 expire_client(old); 2488 }
2079 } 2489 }
2080 move_to_confirmed(unconf); 2490 move_to_confirmed(unconf);
2081 conf = unconf; 2491 conf = unconf;
@@ -2091,20 +2501,27 @@ nfsd4_create_session(struct svc_rqst *rqstp,
2091 cr_ses->flags &= ~SESSION4_RDMA; 2501 cr_ses->flags &= ~SESSION4_RDMA;
2092 2502
2093 init_session(rqstp, new, conf, cr_ses); 2503 init_session(rqstp, new, conf, cr_ses);
2094 nfsd4_init_conn(rqstp, conn, new); 2504 nfsd4_get_session_locked(new);
2095 2505
2096 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 2506 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2097 NFS4_MAX_SESSIONID_LEN); 2507 NFS4_MAX_SESSIONID_LEN);
2098 cs_slot->sl_seqid++; 2508 cs_slot->sl_seqid++;
2099 cr_ses->seqid = cs_slot->sl_seqid; 2509 cr_ses->seqid = cs_slot->sl_seqid;
2100 2510
2101 /* cache solo and embedded create sessions under the state lock */ 2511 /* cache solo and embedded create sessions under the client_lock */
2102 nfsd4_cache_create_session(cr_ses, cs_slot, status); 2512 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2103 nfs4_unlock_state(); 2513 spin_unlock(&nn->client_lock);
2514 /* init connection and backchannel */
2515 nfsd4_init_conn(rqstp, conn, new);
2516 nfsd4_put_session(new);
2517 if (old)
2518 expire_client(old);
2104 return status; 2519 return status;
2105out_free_conn: 2520out_free_conn:
2106 nfs4_unlock_state(); 2521 spin_unlock(&nn->client_lock);
2107 free_conn(conn); 2522 free_conn(conn);
2523 if (old)
2524 expire_client(old);
2108out_free_session: 2525out_free_session:
2109 __free_session(new); 2526 __free_session(new);
2110out_release_drc_mem: 2527out_release_drc_mem:
@@ -2152,17 +2569,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2152 __be32 status; 2569 __be32 status;
2153 struct nfsd4_conn *conn; 2570 struct nfsd4_conn *conn;
2154 struct nfsd4_session *session; 2571 struct nfsd4_session *session;
2155 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2572 struct net *net = SVC_NET(rqstp);
2573 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2156 2574
2157 if (!nfsd4_last_compound_op(rqstp)) 2575 if (!nfsd4_last_compound_op(rqstp))
2158 return nfserr_not_only_op; 2576 return nfserr_not_only_op;
2159 nfs4_lock_state();
2160 spin_lock(&nn->client_lock); 2577 spin_lock(&nn->client_lock);
2161 session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp)); 2578 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2162 spin_unlock(&nn->client_lock); 2579 spin_unlock(&nn->client_lock);
2163 status = nfserr_badsession;
2164 if (!session) 2580 if (!session)
2165 goto out; 2581 goto out_no_session;
2166 status = nfserr_wrong_cred; 2582 status = nfserr_wrong_cred;
2167 if (!mach_creds_match(session->se_client, rqstp)) 2583 if (!mach_creds_match(session->se_client, rqstp))
2168 goto out; 2584 goto out;
@@ -2176,7 +2592,8 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2176 nfsd4_init_conn(rqstp, conn, session); 2592 nfsd4_init_conn(rqstp, conn, session);
2177 status = nfs_ok; 2593 status = nfs_ok;
2178out: 2594out:
2179 nfs4_unlock_state(); 2595 nfsd4_put_session(session);
2596out_no_session:
2180 return status; 2597 return status;
2181} 2598}
2182 2599
@@ -2195,9 +2612,9 @@ nfsd4_destroy_session(struct svc_rqst *r,
2195 struct nfsd4_session *ses; 2612 struct nfsd4_session *ses;
2196 __be32 status; 2613 __be32 status;
2197 int ref_held_by_me = 0; 2614 int ref_held_by_me = 0;
2198 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id); 2615 struct net *net = SVC_NET(r);
2616 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2199 2617
2200 nfs4_lock_state();
2201 status = nfserr_not_only_op; 2618 status = nfserr_not_only_op;
2202 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { 2619 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2203 if (!nfsd4_last_compound_op(r)) 2620 if (!nfsd4_last_compound_op(r))
@@ -2206,14 +2623,12 @@ nfsd4_destroy_session(struct svc_rqst *r,
2206 } 2623 }
2207 dump_sessionid(__func__, &sessionid->sessionid); 2624 dump_sessionid(__func__, &sessionid->sessionid);
2208 spin_lock(&nn->client_lock); 2625 spin_lock(&nn->client_lock);
2209 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r)); 2626 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2210 status = nfserr_badsession;
2211 if (!ses) 2627 if (!ses)
2212 goto out_client_lock; 2628 goto out_client_lock;
2213 status = nfserr_wrong_cred; 2629 status = nfserr_wrong_cred;
2214 if (!mach_creds_match(ses->se_client, r)) 2630 if (!mach_creds_match(ses->se_client, r))
2215 goto out_client_lock; 2631 goto out_put_session;
2216 nfsd4_get_session_locked(ses);
2217 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 2632 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2218 if (status) 2633 if (status)
2219 goto out_put_session; 2634 goto out_put_session;
@@ -2225,11 +2640,10 @@ nfsd4_destroy_session(struct svc_rqst *r,
2225 spin_lock(&nn->client_lock); 2640 spin_lock(&nn->client_lock);
2226 status = nfs_ok; 2641 status = nfs_ok;
2227out_put_session: 2642out_put_session:
2228 nfsd4_put_session(ses); 2643 nfsd4_put_session_locked(ses);
2229out_client_lock: 2644out_client_lock:
2230 spin_unlock(&nn->client_lock); 2645 spin_unlock(&nn->client_lock);
2231out: 2646out:
2232 nfs4_unlock_state();
2233 return status; 2647 return status;
2234} 2648}
2235 2649
@@ -2300,7 +2714,8 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2300 struct nfsd4_conn *conn; 2714 struct nfsd4_conn *conn;
2301 __be32 status; 2715 __be32 status;
2302 int buflen; 2716 int buflen;
2303 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2717 struct net *net = SVC_NET(rqstp);
2718 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2304 2719
2305 if (resp->opcnt != 1) 2720 if (resp->opcnt != 1)
2306 return nfserr_sequence_pos; 2721 return nfserr_sequence_pos;
@@ -2314,17 +2729,10 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2314 return nfserr_jukebox; 2729 return nfserr_jukebox;
2315 2730
2316 spin_lock(&nn->client_lock); 2731 spin_lock(&nn->client_lock);
2317 status = nfserr_badsession; 2732 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2318 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp));
2319 if (!session) 2733 if (!session)
2320 goto out_no_session; 2734 goto out_no_session;
2321 clp = session->se_client; 2735 clp = session->se_client;
2322 status = get_client_locked(clp);
2323 if (status)
2324 goto out_no_session;
2325 status = nfsd4_get_session_locked(session);
2326 if (status)
2327 goto out_put_client;
2328 2736
2329 status = nfserr_too_many_ops; 2737 status = nfserr_too_many_ops;
2330 if (nfsd4_session_too_many_ops(rqstp, session)) 2738 if (nfsd4_session_too_many_ops(rqstp, session))
@@ -2354,6 +2762,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2354 goto out_put_session; 2762 goto out_put_session;
2355 cstate->slot = slot; 2763 cstate->slot = slot;
2356 cstate->session = session; 2764 cstate->session = session;
2765 cstate->clp = clp;
2357 /* Return the cached reply status and set cstate->status 2766 /* Return the cached reply status and set cstate->status
2358 * for nfsd4_proc_compound processing */ 2767 * for nfsd4_proc_compound processing */
2359 status = nfsd4_replay_cache_entry(resp, seq); 2768 status = nfsd4_replay_cache_entry(resp, seq);
@@ -2388,6 +2797,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
2388 2797
2389 cstate->slot = slot; 2798 cstate->slot = slot;
2390 cstate->session = session; 2799 cstate->session = session;
2800 cstate->clp = clp;
2391 2801
2392out: 2802out:
2393 switch (clp->cl_cb_state) { 2803 switch (clp->cl_cb_state) {
@@ -2408,31 +2818,48 @@ out_no_session:
2408 spin_unlock(&nn->client_lock); 2818 spin_unlock(&nn->client_lock);
2409 return status; 2819 return status;
2410out_put_session: 2820out_put_session:
2411 nfsd4_put_session(session); 2821 nfsd4_put_session_locked(session);
2412out_put_client:
2413 put_client_renew_locked(clp);
2414 goto out_no_session; 2822 goto out_no_session;
2415} 2823}
2416 2824
2825void
2826nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2827{
2828 struct nfsd4_compound_state *cs = &resp->cstate;
2829
2830 if (nfsd4_has_session(cs)) {
2831 if (cs->status != nfserr_replay_cache) {
2832 nfsd4_store_cache_entry(resp);
2833 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
2834 }
2835 /* Drop session reference that was taken in nfsd4_sequence() */
2836 nfsd4_put_session(cs->session);
2837 } else if (cs->clp)
2838 put_client_renew(cs->clp);
2839}
2840
2417__be32 2841__be32
2418nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) 2842nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2419{ 2843{
2420 struct nfs4_client *conf, *unconf, *clp; 2844 struct nfs4_client *conf, *unconf;
2845 struct nfs4_client *clp = NULL;
2421 __be32 status = 0; 2846 __be32 status = 0;
2422 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2847 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2423 2848
2424 nfs4_lock_state(); 2849 spin_lock(&nn->client_lock);
2425 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 2850 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2426 conf = find_confirmed_client(&dc->clientid, true, nn); 2851 conf = find_confirmed_client(&dc->clientid, true, nn);
2427 WARN_ON_ONCE(conf && unconf); 2852 WARN_ON_ONCE(conf && unconf);
2428 2853
2429 if (conf) { 2854 if (conf) {
2430 clp = conf;
2431
2432 if (client_has_state(conf)) { 2855 if (client_has_state(conf)) {
2433 status = nfserr_clientid_busy; 2856 status = nfserr_clientid_busy;
2434 goto out; 2857 goto out;
2435 } 2858 }
2859 status = mark_client_expired_locked(conf);
2860 if (status)
2861 goto out;
2862 clp = conf;
2436 } else if (unconf) 2863 } else if (unconf)
2437 clp = unconf; 2864 clp = unconf;
2438 else { 2865 else {
@@ -2440,12 +2867,15 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2440 goto out; 2867 goto out;
2441 } 2868 }
2442 if (!mach_creds_match(clp, rqstp)) { 2869 if (!mach_creds_match(clp, rqstp)) {
2870 clp = NULL;
2443 status = nfserr_wrong_cred; 2871 status = nfserr_wrong_cred;
2444 goto out; 2872 goto out;
2445 } 2873 }
2446 expire_client(clp); 2874 unhash_client_locked(clp);
2447out: 2875out:
2448 nfs4_unlock_state(); 2876 spin_unlock(&nn->client_lock);
2877 if (clp)
2878 expire_client(clp);
2449 return status; 2879 return status;
2450} 2880}
2451 2881
@@ -2464,7 +2894,6 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2464 return nfs_ok; 2894 return nfs_ok;
2465 } 2895 }
2466 2896
2467 nfs4_lock_state();
2468 status = nfserr_complete_already; 2897 status = nfserr_complete_already;
2469 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, 2898 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2470 &cstate->session->se_client->cl_flags)) 2899 &cstate->session->se_client->cl_flags))
@@ -2484,7 +2913,6 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2484 status = nfs_ok; 2913 status = nfs_ok;
2485 nfsd4_client_record_create(cstate->session->se_client); 2914 nfsd4_client_record_create(cstate->session->se_client);
2486out: 2915out:
2487 nfs4_unlock_state();
2488 return status; 2916 return status;
2489} 2917}
2490 2918
@@ -2494,12 +2922,16 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2494{ 2922{
2495 struct xdr_netobj clname = setclid->se_name; 2923 struct xdr_netobj clname = setclid->se_name;
2496 nfs4_verifier clverifier = setclid->se_verf; 2924 nfs4_verifier clverifier = setclid->se_verf;
2497 struct nfs4_client *conf, *unconf, *new; 2925 struct nfs4_client *conf, *new;
2926 struct nfs4_client *unconf = NULL;
2498 __be32 status; 2927 __be32 status;
2499 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2928 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2500 2929
2930 new = create_client(clname, rqstp, &clverifier);
2931 if (new == NULL)
2932 return nfserr_jukebox;
2501 /* Cases below refer to rfc 3530 section 14.2.33: */ 2933 /* Cases below refer to rfc 3530 section 14.2.33: */
2502 nfs4_lock_state(); 2934 spin_lock(&nn->client_lock);
2503 conf = find_confirmed_client_by_name(&clname, nn); 2935 conf = find_confirmed_client_by_name(&clname, nn);
2504 if (conf) { 2936 if (conf) {
2505 /* case 0: */ 2937 /* case 0: */
@@ -2517,11 +2949,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2517 } 2949 }
2518 unconf = find_unconfirmed_client_by_name(&clname, nn); 2950 unconf = find_unconfirmed_client_by_name(&clname, nn);
2519 if (unconf) 2951 if (unconf)
2520 expire_client(unconf); 2952 unhash_client_locked(unconf);
2521 status = nfserr_jukebox;
2522 new = create_client(clname, rqstp, &clverifier);
2523 if (new == NULL)
2524 goto out;
2525 if (conf && same_verf(&conf->cl_verifier, &clverifier)) 2953 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2526 /* case 1: probable callback update */ 2954 /* case 1: probable callback update */
2527 copy_clid(new, conf); 2955 copy_clid(new, conf);
@@ -2533,9 +2961,14 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2533 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 2961 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2534 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 2962 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2535 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 2963 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2964 new = NULL;
2536 status = nfs_ok; 2965 status = nfs_ok;
2537out: 2966out:
2538 nfs4_unlock_state(); 2967 spin_unlock(&nn->client_lock);
2968 if (new)
2969 free_client(new);
2970 if (unconf)
2971 expire_client(unconf);
2539 return status; 2972 return status;
2540} 2973}
2541 2974
@@ -2546,6 +2979,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2546 struct nfsd4_setclientid_confirm *setclientid_confirm) 2979 struct nfsd4_setclientid_confirm *setclientid_confirm)
2547{ 2980{
2548 struct nfs4_client *conf, *unconf; 2981 struct nfs4_client *conf, *unconf;
2982 struct nfs4_client *old = NULL;
2549 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 2983 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2550 clientid_t * clid = &setclientid_confirm->sc_clientid; 2984 clientid_t * clid = &setclientid_confirm->sc_clientid;
2551 __be32 status; 2985 __be32 status;
@@ -2553,8 +2987,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2553 2987
2554 if (STALE_CLIENTID(clid, nn)) 2988 if (STALE_CLIENTID(clid, nn))
2555 return nfserr_stale_clientid; 2989 return nfserr_stale_clientid;
2556 nfs4_lock_state();
2557 2990
2991 spin_lock(&nn->client_lock);
2558 conf = find_confirmed_client(clid, false, nn); 2992 conf = find_confirmed_client(clid, false, nn);
2559 unconf = find_unconfirmed_client(clid, false, nn); 2993 unconf = find_unconfirmed_client(clid, false, nn);
2560 /* 2994 /*
@@ -2578,22 +3012,30 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2578 } 3012 }
2579 status = nfs_ok; 3013 status = nfs_ok;
2580 if (conf) { /* case 1: callback update */ 3014 if (conf) { /* case 1: callback update */
3015 old = unconf;
3016 unhash_client_locked(old);
2581 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 3017 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2582 nfsd4_probe_callback(conf);
2583 expire_client(unconf);
2584 } else { /* case 3: normal case; new or rebooted client */ 3018 } else { /* case 3: normal case; new or rebooted client */
2585 conf = find_confirmed_client_by_name(&unconf->cl_name, nn); 3019 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2586 if (conf) { 3020 if (old) {
2587 status = mark_client_expired(conf); 3021 status = mark_client_expired_locked(old);
2588 if (status) 3022 if (status) {
3023 old = NULL;
2589 goto out; 3024 goto out;
2590 expire_client(conf); 3025 }
2591 } 3026 }
2592 move_to_confirmed(unconf); 3027 move_to_confirmed(unconf);
2593 nfsd4_probe_callback(unconf); 3028 conf = unconf;
2594 } 3029 }
3030 get_client_locked(conf);
3031 spin_unlock(&nn->client_lock);
3032 nfsd4_probe_callback(conf);
3033 spin_lock(&nn->client_lock);
3034 put_client_renew_locked(conf);
2595out: 3035out:
2596 nfs4_unlock_state(); 3036 spin_unlock(&nn->client_lock);
3037 if (old)
3038 expire_client(old);
2597 return status; 3039 return status;
2598} 3040}
2599 3041
@@ -2603,21 +3045,23 @@ static struct nfs4_file *nfsd4_alloc_file(void)
2603} 3045}
2604 3046
2605/* OPEN Share state helper functions */ 3047/* OPEN Share state helper functions */
2606static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) 3048static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
2607{ 3049{
2608 unsigned int hashval = file_hashval(ino); 3050 unsigned int hashval = file_hashval(fh);
3051
3052 lockdep_assert_held(&state_lock);
2609 3053
2610 atomic_set(&fp->fi_ref, 1); 3054 atomic_set(&fp->fi_ref, 1);
3055 spin_lock_init(&fp->fi_lock);
2611 INIT_LIST_HEAD(&fp->fi_stateids); 3056 INIT_LIST_HEAD(&fp->fi_stateids);
2612 INIT_LIST_HEAD(&fp->fi_delegations); 3057 INIT_LIST_HEAD(&fp->fi_delegations);
2613 fp->fi_inode = igrab(ino); 3058 fh_copy_shallow(&fp->fi_fhandle, fh);
2614 fp->fi_had_conflict = false; 3059 fp->fi_had_conflict = false;
2615 fp->fi_lease = NULL; 3060 fp->fi_lease = NULL;
3061 fp->fi_share_deny = 0;
2616 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 3062 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2617 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 3063 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2618 spin_lock(&state_lock);
2619 hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]); 3064 hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
2620 spin_unlock(&state_lock);
2621} 3065}
2622 3066
2623void 3067void
@@ -2673,6 +3117,28 @@ static void init_nfs4_replay(struct nfs4_replay *rp)
2673 rp->rp_status = nfserr_serverfault; 3117 rp->rp_status = nfserr_serverfault;
2674 rp->rp_buflen = 0; 3118 rp->rp_buflen = 0;
2675 rp->rp_buf = rp->rp_ibuf; 3119 rp->rp_buf = rp->rp_ibuf;
3120 mutex_init(&rp->rp_mutex);
3121}
3122
3123static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3124 struct nfs4_stateowner *so)
3125{
3126 if (!nfsd4_has_session(cstate)) {
3127 mutex_lock(&so->so_replay.rp_mutex);
3128 cstate->replay_owner = so;
3129 atomic_inc(&so->so_count);
3130 }
3131}
3132
3133void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3134{
3135 struct nfs4_stateowner *so = cstate->replay_owner;
3136
3137 if (so != NULL) {
3138 cstate->replay_owner = NULL;
3139 mutex_unlock(&so->so_replay.rp_mutex);
3140 nfs4_put_stateowner(so);
3141 }
2676} 3142}
2677 3143
2678static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 3144static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
@@ -2693,111 +3159,172 @@ static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj
2693 INIT_LIST_HEAD(&sop->so_stateids); 3159 INIT_LIST_HEAD(&sop->so_stateids);
2694 sop->so_client = clp; 3160 sop->so_client = clp;
2695 init_nfs4_replay(&sop->so_replay); 3161 init_nfs4_replay(&sop->so_replay);
3162 atomic_set(&sop->so_count, 1);
2696 return sop; 3163 return sop;
2697} 3164}
2698 3165
2699static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 3166static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2700{ 3167{
2701 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3168 lockdep_assert_held(&clp->cl_lock);
2702 3169
2703 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); 3170 list_add(&oo->oo_owner.so_strhash,
3171 &clp->cl_ownerstr_hashtbl[strhashval]);
2704 list_add(&oo->oo_perclient, &clp->cl_openowners); 3172 list_add(&oo->oo_perclient, &clp->cl_openowners);
2705} 3173}
2706 3174
3175static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3176{
3177 unhash_openowner_locked(openowner(so));
3178}
3179
3180static void nfs4_free_openowner(struct nfs4_stateowner *so)
3181{
3182 struct nfs4_openowner *oo = openowner(so);
3183
3184 kmem_cache_free(openowner_slab, oo);
3185}
3186
3187static const struct nfs4_stateowner_operations openowner_ops = {
3188 .so_unhash = nfs4_unhash_openowner,
3189 .so_free = nfs4_free_openowner,
3190};
3191
2707static struct nfs4_openowner * 3192static struct nfs4_openowner *
2708alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) { 3193alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
2709 struct nfs4_openowner *oo; 3194 struct nfsd4_compound_state *cstate)
3195{
3196 struct nfs4_client *clp = cstate->clp;
3197 struct nfs4_openowner *oo, *ret;
2710 3198
2711 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); 3199 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2712 if (!oo) 3200 if (!oo)
2713 return NULL; 3201 return NULL;
3202 oo->oo_owner.so_ops = &openowner_ops;
2714 oo->oo_owner.so_is_open_owner = 1; 3203 oo->oo_owner.so_is_open_owner = 1;
2715 oo->oo_owner.so_seqid = open->op_seqid; 3204 oo->oo_owner.so_seqid = open->op_seqid;
2716 oo->oo_flags = NFS4_OO_NEW; 3205 oo->oo_flags = 0;
3206 if (nfsd4_has_session(cstate))
3207 oo->oo_flags |= NFS4_OO_CONFIRMED;
2717 oo->oo_time = 0; 3208 oo->oo_time = 0;
2718 oo->oo_last_closed_stid = NULL; 3209 oo->oo_last_closed_stid = NULL;
2719 INIT_LIST_HEAD(&oo->oo_close_lru); 3210 INIT_LIST_HEAD(&oo->oo_close_lru);
2720 hash_openowner(oo, clp, strhashval); 3211 spin_lock(&clp->cl_lock);
3212 ret = find_openstateowner_str_locked(strhashval, open, clp);
3213 if (ret == NULL) {
3214 hash_openowner(oo, clp, strhashval);
3215 ret = oo;
3216 } else
3217 nfs4_free_openowner(&oo->oo_owner);
3218 spin_unlock(&clp->cl_lock);
2721 return oo; 3219 return oo;
2722} 3220}
2723 3221
2724static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { 3222static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2725 struct nfs4_openowner *oo = open->op_openowner; 3223 struct nfs4_openowner *oo = open->op_openowner;
2726 3224
3225 atomic_inc(&stp->st_stid.sc_count);
2727 stp->st_stid.sc_type = NFS4_OPEN_STID; 3226 stp->st_stid.sc_type = NFS4_OPEN_STID;
2728 INIT_LIST_HEAD(&stp->st_lockowners); 3227 INIT_LIST_HEAD(&stp->st_locks);
2729 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2730 list_add(&stp->st_perfile, &fp->fi_stateids);
2731 stp->st_stateowner = &oo->oo_owner; 3228 stp->st_stateowner = &oo->oo_owner;
3229 atomic_inc(&stp->st_stateowner->so_count);
2732 get_nfs4_file(fp); 3230 get_nfs4_file(fp);
2733 stp->st_file = fp; 3231 stp->st_stid.sc_file = fp;
2734 stp->st_access_bmap = 0; 3232 stp->st_access_bmap = 0;
2735 stp->st_deny_bmap = 0; 3233 stp->st_deny_bmap = 0;
2736 set_access(open->op_share_access, stp);
2737 set_deny(open->op_share_deny, stp);
2738 stp->st_openstp = NULL; 3234 stp->st_openstp = NULL;
3235 spin_lock(&oo->oo_owner.so_client->cl_lock);
3236 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3237 spin_lock(&fp->fi_lock);
3238 list_add(&stp->st_perfile, &fp->fi_stateids);
3239 spin_unlock(&fp->fi_lock);
3240 spin_unlock(&oo->oo_owner.so_client->cl_lock);
2739} 3241}
2740 3242
3243/*
3244 * In the 4.0 case we need to keep the owners around a little while to handle
3245 * CLOSE replay. We still do need to release any file access that is held by
3246 * them before returning however.
3247 */
2741static void 3248static void
2742move_to_close_lru(struct nfs4_openowner *oo, struct net *net) 3249move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
2743{ 3250{
2744 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3251 struct nfs4_ol_stateid *last;
3252 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3253 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3254 nfsd_net_id);
2745 3255
2746 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 3256 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2747 3257
3258 /*
3259 * We know that we hold one reference via nfsd4_close, and another
3260 * "persistent" reference for the client. If the refcount is higher
3261 * than 2, then there are still calls in progress that are using this
3262 * stateid. We can't put the sc_file reference until they are finished.
3263 * Wait for the refcount to drop to 2. Since it has been unhashed,
3264 * there should be no danger of the refcount going back up again at
3265 * this point.
3266 */
3267 wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3268
3269 release_all_access(s);
3270 if (s->st_stid.sc_file) {
3271 put_nfs4_file(s->st_stid.sc_file);
3272 s->st_stid.sc_file = NULL;
3273 }
3274
3275 spin_lock(&nn->client_lock);
3276 last = oo->oo_last_closed_stid;
3277 oo->oo_last_closed_stid = s;
2748 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 3278 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2749 oo->oo_time = get_seconds(); 3279 oo->oo_time = get_seconds();
3280 spin_unlock(&nn->client_lock);
3281 if (last)
3282 nfs4_put_stid(&last->st_stid);
2750} 3283}
2751 3284
2752static int 3285/* search file_hashtbl[] for file */
2753same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner, 3286static struct nfs4_file *
2754 clientid_t *clid) 3287find_file_locked(struct knfsd_fh *fh)
2755{ 3288{
2756 return (sop->so_owner.len == owner->len) && 3289 unsigned int hashval = file_hashval(fh);
2757 0 == memcmp(sop->so_owner.data, owner->data, owner->len) && 3290 struct nfs4_file *fp;
2758 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2759}
2760 3291
2761static struct nfs4_openowner * 3292 lockdep_assert_held(&state_lock);
2762find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
2763 bool sessions, struct nfsd_net *nn)
2764{
2765 struct nfs4_stateowner *so;
2766 struct nfs4_openowner *oo;
2767 struct nfs4_client *clp;
2768 3293
2769 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) { 3294 hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2770 if (!so->so_is_open_owner) 3295 if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
2771 continue; 3296 get_nfs4_file(fp);
2772 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) { 3297 return fp;
2773 oo = openowner(so);
2774 clp = oo->oo_owner.so_client;
2775 if ((bool)clp->cl_minorversion != sessions)
2776 return NULL;
2777 renew_client(oo->oo_owner.so_client);
2778 return oo;
2779 } 3298 }
2780 } 3299 }
2781 return NULL; 3300 return NULL;
2782} 3301}
2783 3302
2784/* search file_hashtbl[] for file */
2785static struct nfs4_file * 3303static struct nfs4_file *
2786find_file(struct inode *ino) 3304find_file(struct knfsd_fh *fh)
2787{ 3305{
2788 unsigned int hashval = file_hashval(ino);
2789 struct nfs4_file *fp; 3306 struct nfs4_file *fp;
2790 3307
2791 spin_lock(&state_lock); 3308 spin_lock(&state_lock);
2792 hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { 3309 fp = find_file_locked(fh);
2793 if (fp->fi_inode == ino) { 3310 spin_unlock(&state_lock);
2794 get_nfs4_file(fp); 3311 return fp;
2795 spin_unlock(&state_lock); 3312}
2796 return fp; 3313
2797 } 3314static struct nfs4_file *
3315find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3316{
3317 struct nfs4_file *fp;
3318
3319 spin_lock(&state_lock);
3320 fp = find_file_locked(fh);
3321 if (fp == NULL) {
3322 nfsd4_init_file(new, fh);
3323 fp = new;
2798 } 3324 }
2799 spin_unlock(&state_lock); 3325 spin_unlock(&state_lock);
2800 return NULL; 3326
3327 return fp;
2801} 3328}
2802 3329
2803/* 3330/*
@@ -2807,47 +3334,53 @@ find_file(struct inode *ino)
2807static __be32 3334static __be32
2808nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 3335nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2809{ 3336{
2810 struct inode *ino = current_fh->fh_dentry->d_inode;
2811 struct nfs4_file *fp; 3337 struct nfs4_file *fp;
2812 struct nfs4_ol_stateid *stp; 3338 __be32 ret = nfs_ok;
2813 __be32 ret;
2814 3339
2815 fp = find_file(ino); 3340 fp = find_file(&current_fh->fh_handle);
2816 if (!fp) 3341 if (!fp)
2817 return nfs_ok; 3342 return ret;
2818 ret = nfserr_locked; 3343 /* Check for conflicting share reservations */
2819 /* Search for conflicting share reservations */ 3344 spin_lock(&fp->fi_lock);
2820 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 3345 if (fp->fi_share_deny & deny_type)
2821 if (test_deny(deny_type, stp) || 3346 ret = nfserr_locked;
2822 test_deny(NFS4_SHARE_DENY_BOTH, stp)) 3347 spin_unlock(&fp->fi_lock);
2823 goto out;
2824 }
2825 ret = nfs_ok;
2826out:
2827 put_nfs4_file(fp); 3348 put_nfs4_file(fp);
2828 return ret; 3349 return ret;
2829} 3350}
2830 3351
2831static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 3352void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp)
2832{ 3353{
2833 struct nfs4_client *clp = dp->dl_stid.sc_client; 3354 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
2834 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3355 nfsd_net_id);
2835 3356
2836 lockdep_assert_held(&state_lock); 3357 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
2837 /* We're assuming the state code never drops its reference 3358
3359 /*
3360 * We can't do this in nfsd_break_deleg_cb because it is
3361 * already holding inode->i_lock.
3362 *
3363 * If the dl_time != 0, then we know that it has already been
3364 * queued for a lease break. Don't queue it again.
3365 */
3366 spin_lock(&state_lock);
3367 if (dp->dl_time == 0) {
3368 dp->dl_time = get_seconds();
3369 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3370 }
3371 spin_unlock(&state_lock);
3372}
3373
3374static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3375{
3376 /*
3377 * We're assuming the state code never drops its reference
2838 * without first removing the lease. Since we're in this lease 3378 * without first removing the lease. Since we're in this lease
2839 * callback (and since the lease code is serialized by the kernel 3379 * callback (and since the lease code is serialized by the kernel
2840 * lock) we know the server hasn't removed the lease yet, we know 3380 * lock) we know the server hasn't removed the lease yet, we know
2841 * it's safe to take a reference: */ 3381 * it's safe to take a reference.
2842 atomic_inc(&dp->dl_count); 3382 */
2843 3383 atomic_inc(&dp->dl_stid.sc_count);
2844 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
2845
2846 /* Only place dl_time is set; protected by i_lock: */
2847 dp->dl_time = get_seconds();
2848
2849 block_delegations(&dp->dl_fh);
2850
2851 nfsd4_cb_recall(dp); 3384 nfsd4_cb_recall(dp);
2852} 3385}
2853 3386
@@ -2872,11 +3405,20 @@ static void nfsd_break_deleg_cb(struct file_lock *fl)
2872 */ 3405 */
2873 fl->fl_break_time = 0; 3406 fl->fl_break_time = 0;
2874 3407
2875 spin_lock(&state_lock); 3408 spin_lock(&fp->fi_lock);
2876 fp->fi_had_conflict = true; 3409 fp->fi_had_conflict = true;
2877 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) 3410 /*
2878 nfsd_break_one_deleg(dp); 3411 * If there are no delegations on the list, then we can't count on this
2879 spin_unlock(&state_lock); 3412 * lease ever being cleaned up. Set the fl_break_time to jiffies so that
3413 * time_out_leases will do it ASAP. The fact that fi_had_conflict is now
3414 * true should keep any new delegations from being hashed.
3415 */
3416 if (list_empty(&fp->fi_delegations))
3417 fl->fl_break_time = jiffies;
3418 else
3419 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3420 nfsd_break_one_deleg(dp);
3421 spin_unlock(&fp->fi_lock);
2880} 3422}
2881 3423
2882static 3424static
@@ -2904,6 +3446,42 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4
2904 return nfserr_bad_seqid; 3446 return nfserr_bad_seqid;
2905} 3447}
2906 3448
3449static __be32 lookup_clientid(clientid_t *clid,
3450 struct nfsd4_compound_state *cstate,
3451 struct nfsd_net *nn)
3452{
3453 struct nfs4_client *found;
3454
3455 if (cstate->clp) {
3456 found = cstate->clp;
3457 if (!same_clid(&found->cl_clientid, clid))
3458 return nfserr_stale_clientid;
3459 return nfs_ok;
3460 }
3461
3462 if (STALE_CLIENTID(clid, nn))
3463 return nfserr_stale_clientid;
3464
3465 /*
3466 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3467 * cached already then we know this is for is for v4.0 and "sessions"
3468 * will be false.
3469 */
3470 WARN_ON_ONCE(cstate->session);
3471 spin_lock(&nn->client_lock);
3472 found = find_confirmed_client(clid, false, nn);
3473 if (!found) {
3474 spin_unlock(&nn->client_lock);
3475 return nfserr_expired;
3476 }
3477 atomic_inc(&found->cl_refcount);
3478 spin_unlock(&nn->client_lock);
3479
3480 /* Cache the nfs4_client in cstate! */
3481 cstate->clp = found;
3482 return nfs_ok;
3483}
3484
2907__be32 3485__be32
2908nfsd4_process_open1(struct nfsd4_compound_state *cstate, 3486nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2909 struct nfsd4_open *open, struct nfsd_net *nn) 3487 struct nfsd4_open *open, struct nfsd_net *nn)
@@ -2924,19 +3502,19 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2924 if (open->op_file == NULL) 3502 if (open->op_file == NULL)
2925 return nfserr_jukebox; 3503 return nfserr_jukebox;
2926 3504
2927 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner); 3505 status = lookup_clientid(clientid, cstate, nn);
2928 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn); 3506 if (status)
3507 return status;
3508 clp = cstate->clp;
3509
3510 strhashval = ownerstr_hashval(&open->op_owner);
3511 oo = find_openstateowner_str(strhashval, open, clp);
2929 open->op_openowner = oo; 3512 open->op_openowner = oo;
2930 if (!oo) { 3513 if (!oo) {
2931 clp = find_confirmed_client(clientid, cstate->minorversion,
2932 nn);
2933 if (clp == NULL)
2934 return nfserr_expired;
2935 goto new_owner; 3514 goto new_owner;
2936 } 3515 }
2937 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 3516 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2938 /* Replace unconfirmed owners without checking for replay. */ 3517 /* Replace unconfirmed owners without checking for replay. */
2939 clp = oo->oo_owner.so_client;
2940 release_openowner(oo); 3518 release_openowner(oo);
2941 open->op_openowner = NULL; 3519 open->op_openowner = NULL;
2942 goto new_owner; 3520 goto new_owner;
@@ -2944,15 +3522,14 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2944 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 3522 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2945 if (status) 3523 if (status)
2946 return status; 3524 return status;
2947 clp = oo->oo_owner.so_client;
2948 goto alloc_stateid; 3525 goto alloc_stateid;
2949new_owner: 3526new_owner:
2950 oo = alloc_init_open_stateowner(strhashval, clp, open); 3527 oo = alloc_init_open_stateowner(strhashval, open, cstate);
2951 if (oo == NULL) 3528 if (oo == NULL)
2952 return nfserr_jukebox; 3529 return nfserr_jukebox;
2953 open->op_openowner = oo; 3530 open->op_openowner = oo;
2954alloc_stateid: 3531alloc_stateid:
2955 open->op_stp = nfs4_alloc_stateid(clp); 3532 open->op_stp = nfs4_alloc_open_stateid(clp);
2956 if (!open->op_stp) 3533 if (!open->op_stp)
2957 return nfserr_jukebox; 3534 return nfserr_jukebox;
2958 return nfs_ok; 3535 return nfs_ok;
@@ -2994,14 +3571,18 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
2994{ 3571{
2995 int flags; 3572 int flags;
2996 __be32 status = nfserr_bad_stateid; 3573 __be32 status = nfserr_bad_stateid;
3574 struct nfs4_delegation *deleg;
2997 3575
2998 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid); 3576 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
2999 if (*dp == NULL) 3577 if (deleg == NULL)
3000 goto out; 3578 goto out;
3001 flags = share_access_to_flags(open->op_share_access); 3579 flags = share_access_to_flags(open->op_share_access);
3002 status = nfs4_check_delegmode(*dp, flags); 3580 status = nfs4_check_delegmode(deleg, flags);
3003 if (status) 3581 if (status) {
3004 *dp = NULL; 3582 nfs4_put_stid(&deleg->dl_stid);
3583 goto out;
3584 }
3585 *dp = deleg;
3005out: 3586out:
3006 if (!nfsd4_is_deleg_cur(open)) 3587 if (!nfsd4_is_deleg_cur(open))
3007 return nfs_ok; 3588 return nfs_ok;
@@ -3011,24 +3592,25 @@ out:
3011 return nfs_ok; 3592 return nfs_ok;
3012} 3593}
3013 3594
3014static __be32 3595static struct nfs4_ol_stateid *
3015nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp) 3596nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3016{ 3597{
3017 struct nfs4_ol_stateid *local; 3598 struct nfs4_ol_stateid *local, *ret = NULL;
3018 struct nfs4_openowner *oo = open->op_openowner; 3599 struct nfs4_openowner *oo = open->op_openowner;
3019 3600
3601 spin_lock(&fp->fi_lock);
3020 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 3602 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3021 /* ignore lock owners */ 3603 /* ignore lock owners */
3022 if (local->st_stateowner->so_is_open_owner == 0) 3604 if (local->st_stateowner->so_is_open_owner == 0)
3023 continue; 3605 continue;
3024 /* remember if we have seen this open owner */ 3606 if (local->st_stateowner == &oo->oo_owner) {
3025 if (local->st_stateowner == &oo->oo_owner) 3607 ret = local;
3026 *stpp = local; 3608 atomic_inc(&ret->st_stid.sc_count);
3027 /* check for conflicting share reservations */ 3609 break;
3028 if (!test_share(local, open)) 3610 }
3029 return nfserr_share_denied;
3030 } 3611 }
3031 return nfs_ok; 3612 spin_unlock(&fp->fi_lock);
3613 return ret;
3032} 3614}
3033 3615
3034static inline int nfs4_access_to_access(u32 nfs4_access) 3616static inline int nfs4_access_to_access(u32 nfs4_access)
@@ -3042,24 +3624,6 @@ static inline int nfs4_access_to_access(u32 nfs4_access)
3042 return flags; 3624 return flags;
3043} 3625}
3044 3626
3045static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3046 struct svc_fh *cur_fh, struct nfsd4_open *open)
3047{
3048 __be32 status;
3049 int oflag = nfs4_access_to_omode(open->op_share_access);
3050 int access = nfs4_access_to_access(open->op_share_access);
3051
3052 if (!fp->fi_fds[oflag]) {
3053 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
3054 &fp->fi_fds[oflag]);
3055 if (status)
3056 return status;
3057 }
3058 nfs4_file_get_access(fp, oflag);
3059
3060 return nfs_ok;
3061}
3062
3063static inline __be32 3627static inline __be32
3064nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 3628nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3065 struct nfsd4_open *open) 3629 struct nfsd4_open *open)
@@ -3075,34 +3639,99 @@ nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3075 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0); 3639 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3076} 3640}
3077 3641
3078static __be32 3642static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3079nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) 3643 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3644 struct nfsd4_open *open)
3080{ 3645{
3081 u32 op_share_access = open->op_share_access; 3646 struct file *filp = NULL;
3082 bool new_access;
3083 __be32 status; 3647 __be32 status;
3648 int oflag = nfs4_access_to_omode(open->op_share_access);
3649 int access = nfs4_access_to_access(open->op_share_access);
3650 unsigned char old_access_bmap, old_deny_bmap;
3084 3651
3085 new_access = !test_access(op_share_access, stp); 3652 spin_lock(&fp->fi_lock);
3086 if (new_access) { 3653
3087 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); 3654 /*
3088 if (status) 3655 * Are we trying to set a deny mode that would conflict with
3089 return status; 3656 * current access?
3657 */
3658 status = nfs4_file_check_deny(fp, open->op_share_deny);
3659 if (status != nfs_ok) {
3660 spin_unlock(&fp->fi_lock);
3661 goto out;
3090 } 3662 }
3091 status = nfsd4_truncate(rqstp, cur_fh, open); 3663
3092 if (status) { 3664 /* set access to the file */
3093 if (new_access) { 3665 status = nfs4_file_get_access(fp, open->op_share_access);
3094 int oflag = nfs4_access_to_omode(op_share_access); 3666 if (status != nfs_ok) {
3095 nfs4_file_put_access(fp, oflag); 3667 spin_unlock(&fp->fi_lock);
3096 } 3668 goto out;
3097 return status;
3098 } 3669 }
3099 /* remember the open */ 3670
3100 set_access(op_share_access, stp); 3671 /* Set access bits in stateid */
3672 old_access_bmap = stp->st_access_bmap;
3673 set_access(open->op_share_access, stp);
3674
3675 /* Set new deny mask */
3676 old_deny_bmap = stp->st_deny_bmap;
3101 set_deny(open->op_share_deny, stp); 3677 set_deny(open->op_share_deny, stp);
3678 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3102 3679
3103 return nfs_ok; 3680 if (!fp->fi_fds[oflag]) {
3681 spin_unlock(&fp->fi_lock);
3682 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3683 if (status)
3684 goto out_put_access;
3685 spin_lock(&fp->fi_lock);
3686 if (!fp->fi_fds[oflag]) {
3687 fp->fi_fds[oflag] = filp;
3688 filp = NULL;
3689 }
3690 }
3691 spin_unlock(&fp->fi_lock);
3692 if (filp)
3693 fput(filp);
3694
3695 status = nfsd4_truncate(rqstp, cur_fh, open);
3696 if (status)
3697 goto out_put_access;
3698out:
3699 return status;
3700out_put_access:
3701 stp->st_access_bmap = old_access_bmap;
3702 nfs4_file_put_access(fp, open->op_share_access);
3703 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
3704 goto out;
3104} 3705}
3105 3706
3707static __be32
3708nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
3709{
3710 __be32 status;
3711 unsigned char old_deny_bmap;
3712
3713 if (!test_access(open->op_share_access, stp))
3714 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
3715
3716 /* test and set deny mode */
3717 spin_lock(&fp->fi_lock);
3718 status = nfs4_file_check_deny(fp, open->op_share_deny);
3719 if (status == nfs_ok) {
3720 old_deny_bmap = stp->st_deny_bmap;
3721 set_deny(open->op_share_deny, stp);
3722 fp->fi_share_deny |=
3723 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
3724 }
3725 spin_unlock(&fp->fi_lock);
3726
3727 if (status != nfs_ok)
3728 return status;
3729
3730 status = nfsd4_truncate(rqstp, cur_fh, open);
3731 if (status != nfs_ok)
3732 reset_union_bmap_deny(old_deny_bmap, stp);
3733 return status;
3734}
3106 3735
3107static void 3736static void
3108nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session) 3737nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
@@ -3123,7 +3752,7 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3123 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 3752 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
3124} 3753}
3125 3754
3126static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) 3755static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
3127{ 3756{
3128 struct file_lock *fl; 3757 struct file_lock *fl;
3129 3758
@@ -3135,53 +3764,101 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int f
3135 fl->fl_flags = FL_DELEG; 3764 fl->fl_flags = FL_DELEG;
3136 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; 3765 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
3137 fl->fl_end = OFFSET_MAX; 3766 fl->fl_end = OFFSET_MAX;
3138 fl->fl_owner = (fl_owner_t)(dp->dl_file); 3767 fl->fl_owner = (fl_owner_t)fp;
3139 fl->fl_pid = current->tgid; 3768 fl->fl_pid = current->tgid;
3140 return fl; 3769 return fl;
3141} 3770}
3142 3771
3143static int nfs4_setlease(struct nfs4_delegation *dp) 3772static int nfs4_setlease(struct nfs4_delegation *dp)
3144{ 3773{
3145 struct nfs4_file *fp = dp->dl_file; 3774 struct nfs4_file *fp = dp->dl_stid.sc_file;
3146 struct file_lock *fl; 3775 struct file_lock *fl;
3147 int status; 3776 struct file *filp;
3777 int status = 0;
3148 3778
3149 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ); 3779 fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
3150 if (!fl) 3780 if (!fl)
3151 return -ENOMEM; 3781 return -ENOMEM;
3152 fl->fl_file = find_readable_file(fp); 3782 filp = find_readable_file(fp);
3153 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); 3783 if (!filp) {
3154 if (status) 3784 /* We should always have a readable file here */
3155 goto out_free; 3785 WARN_ON_ONCE(1);
3786 return -EBADF;
3787 }
3788 fl->fl_file = filp;
3789 status = vfs_setlease(filp, fl->fl_type, &fl);
3790 if (status) {
3791 locks_free_lock(fl);
3792 goto out_fput;
3793 }
3794 spin_lock(&state_lock);
3795 spin_lock(&fp->fi_lock);
3796 /* Did the lease get broken before we took the lock? */
3797 status = -EAGAIN;
3798 if (fp->fi_had_conflict)
3799 goto out_unlock;
3800 /* Race breaker */
3801 if (fp->fi_lease) {
3802 status = 0;
3803 atomic_inc(&fp->fi_delegees);
3804 hash_delegation_locked(dp, fp);
3805 goto out_unlock;
3806 }
3156 fp->fi_lease = fl; 3807 fp->fi_lease = fl;
3157 fp->fi_deleg_file = get_file(fl->fl_file); 3808 fp->fi_deleg_file = filp;
3158 atomic_set(&fp->fi_delegees, 1); 3809 atomic_set(&fp->fi_delegees, 1);
3159 spin_lock(&state_lock);
3160 hash_delegation_locked(dp, fp); 3810 hash_delegation_locked(dp, fp);
3811 spin_unlock(&fp->fi_lock);
3161 spin_unlock(&state_lock); 3812 spin_unlock(&state_lock);
3162 return 0; 3813 return 0;
3163out_free: 3814out_unlock:
3164 locks_free_lock(fl); 3815 spin_unlock(&fp->fi_lock);
3816 spin_unlock(&state_lock);
3817out_fput:
3818 fput(filp);
3165 return status; 3819 return status;
3166} 3820}
3167 3821
3168static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp) 3822static struct nfs4_delegation *
3823nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
3824 struct nfs4_file *fp)
3169{ 3825{
3826 int status;
3827 struct nfs4_delegation *dp;
3828
3170 if (fp->fi_had_conflict) 3829 if (fp->fi_had_conflict)
3171 return -EAGAIN; 3830 return ERR_PTR(-EAGAIN);
3831
3832 dp = alloc_init_deleg(clp, fh);
3833 if (!dp)
3834 return ERR_PTR(-ENOMEM);
3835
3172 get_nfs4_file(fp); 3836 get_nfs4_file(fp);
3173 dp->dl_file = fp;
3174 if (!fp->fi_lease)
3175 return nfs4_setlease(dp);
3176 spin_lock(&state_lock); 3837 spin_lock(&state_lock);
3838 spin_lock(&fp->fi_lock);
3839 dp->dl_stid.sc_file = fp;
3840 if (!fp->fi_lease) {
3841 spin_unlock(&fp->fi_lock);
3842 spin_unlock(&state_lock);
3843 status = nfs4_setlease(dp);
3844 goto out;
3845 }
3177 atomic_inc(&fp->fi_delegees); 3846 atomic_inc(&fp->fi_delegees);
3178 if (fp->fi_had_conflict) { 3847 if (fp->fi_had_conflict) {
3179 spin_unlock(&state_lock); 3848 status = -EAGAIN;
3180 return -EAGAIN; 3849 goto out_unlock;
3181 } 3850 }
3182 hash_delegation_locked(dp, fp); 3851 hash_delegation_locked(dp, fp);
3852 status = 0;
3853out_unlock:
3854 spin_unlock(&fp->fi_lock);
3183 spin_unlock(&state_lock); 3855 spin_unlock(&state_lock);
3184 return 0; 3856out:
3857 if (status) {
3858 nfs4_put_stid(&dp->dl_stid);
3859 return ERR_PTR(status);
3860 }
3861 return dp;
3185} 3862}
3186 3863
3187static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 3864static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
@@ -3212,11 +3889,12 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
3212 * proper support for them. 3889 * proper support for them.
3213 */ 3890 */
3214static void 3891static void
3215nfs4_open_delegation(struct net *net, struct svc_fh *fh, 3892nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
3216 struct nfsd4_open *open, struct nfs4_ol_stateid *stp) 3893 struct nfs4_ol_stateid *stp)
3217{ 3894{
3218 struct nfs4_delegation *dp; 3895 struct nfs4_delegation *dp;
3219 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner); 3896 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
3897 struct nfs4_client *clp = stp->st_stid.sc_client;
3220 int cb_up; 3898 int cb_up;
3221 int status = 0; 3899 int status = 0;
3222 3900
@@ -3235,7 +3913,7 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
3235 * Let's not give out any delegations till everyone's 3913 * Let's not give out any delegations till everyone's
3236 * had the chance to reclaim theirs.... 3914 * had the chance to reclaim theirs....
3237 */ 3915 */
3238 if (locks_in_grace(net)) 3916 if (locks_in_grace(clp->net))
3239 goto out_no_deleg; 3917 goto out_no_deleg;
3240 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 3918 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3241 goto out_no_deleg; 3919 goto out_no_deleg;
@@ -3254,21 +3932,17 @@ nfs4_open_delegation(struct net *net, struct svc_fh *fh,
3254 default: 3932 default:
3255 goto out_no_deleg; 3933 goto out_no_deleg;
3256 } 3934 }
3257 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh); 3935 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
3258 if (dp == NULL) 3936 if (IS_ERR(dp))
3259 goto out_no_deleg; 3937 goto out_no_deleg;
3260 status = nfs4_set_delegation(dp, stp->st_file);
3261 if (status)
3262 goto out_free;
3263 3938
3264 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 3939 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
3265 3940
3266 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n", 3941 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3267 STATEID_VAL(&dp->dl_stid.sc_stateid)); 3942 STATEID_VAL(&dp->dl_stid.sc_stateid));
3268 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; 3943 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3944 nfs4_put_stid(&dp->dl_stid);
3269 return; 3945 return;
3270out_free:
3271 destroy_delegation(dp);
3272out_no_deleg: 3946out_no_deleg:
3273 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; 3947 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
3274 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 3948 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
@@ -3301,16 +3975,12 @@ static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
3301 */ 3975 */
3302} 3976}
3303 3977
3304/*
3305 * called with nfs4_lock_state() held.
3306 */
3307__be32 3978__be32
3308nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 3979nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3309{ 3980{
3310 struct nfsd4_compoundres *resp = rqstp->rq_resp; 3981 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3311 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 3982 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3312 struct nfs4_file *fp = NULL; 3983 struct nfs4_file *fp = NULL;
3313 struct inode *ino = current_fh->fh_dentry->d_inode;
3314 struct nfs4_ol_stateid *stp = NULL; 3984 struct nfs4_ol_stateid *stp = NULL;
3315 struct nfs4_delegation *dp = NULL; 3985 struct nfs4_delegation *dp = NULL;
3316 __be32 status; 3986 __be32 status;
@@ -3320,21 +3990,18 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
3320 * and check for delegations in the process of being recalled. 3990 * and check for delegations in the process of being recalled.
3321 * If not found, create the nfs4_file struct 3991 * If not found, create the nfs4_file struct
3322 */ 3992 */
3323 fp = find_file(ino); 3993 fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
3324 if (fp) { 3994 if (fp != open->op_file) {
3325 if ((status = nfs4_check_open(fp, open, &stp)))
3326 goto out;
3327 status = nfs4_check_deleg(cl, open, &dp); 3995 status = nfs4_check_deleg(cl, open, &dp);
3328 if (status) 3996 if (status)
3329 goto out; 3997 goto out;
3998 stp = nfsd4_find_existing_open(fp, open);
3330 } else { 3999 } else {
4000 open->op_file = NULL;
3331 status = nfserr_bad_stateid; 4001 status = nfserr_bad_stateid;
3332 if (nfsd4_is_deleg_cur(open)) 4002 if (nfsd4_is_deleg_cur(open))
3333 goto out; 4003 goto out;
3334 status = nfserr_jukebox; 4004 status = nfserr_jukebox;
3335 fp = open->op_file;
3336 open->op_file = NULL;
3337 nfsd4_init_file(fp, ino);
3338 } 4005 }
3339 4006
3340 /* 4007 /*
@@ -3347,22 +4014,19 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
3347 if (status) 4014 if (status)
3348 goto out; 4015 goto out;
3349 } else { 4016 } else {
3350 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3351 if (status)
3352 goto out;
3353 status = nfsd4_truncate(rqstp, current_fh, open);
3354 if (status)
3355 goto out;
3356 stp = open->op_stp; 4017 stp = open->op_stp;
3357 open->op_stp = NULL; 4018 open->op_stp = NULL;
3358 init_open_stateid(stp, fp, open); 4019 init_open_stateid(stp, fp, open);
4020 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4021 if (status) {
4022 release_open_stateid(stp);
4023 goto out;
4024 }
3359 } 4025 }
3360 update_stateid(&stp->st_stid.sc_stateid); 4026 update_stateid(&stp->st_stid.sc_stateid);
3361 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4027 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3362 4028
3363 if (nfsd4_has_session(&resp->cstate)) { 4029 if (nfsd4_has_session(&resp->cstate)) {
3364 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3365
3366 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { 4030 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3367 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; 4031 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3368 open->op_why_no_deleg = WND4_NOT_WANTED; 4032 open->op_why_no_deleg = WND4_NOT_WANTED;
@@ -3374,7 +4038,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
3374 * Attempt to hand out a delegation. No error return, because the 4038 * Attempt to hand out a delegation. No error return, because the
3375 * OPEN succeeds even if we fail. 4039 * OPEN succeeds even if we fail.
3376 */ 4040 */
3377 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp); 4041 nfs4_open_delegation(current_fh, open, stp);
3378nodeleg: 4042nodeleg:
3379 status = nfs_ok; 4043 status = nfs_ok;
3380 4044
@@ -3397,41 +4061,27 @@ out:
3397 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) && 4061 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3398 !nfsd4_has_session(&resp->cstate)) 4062 !nfsd4_has_session(&resp->cstate))
3399 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 4063 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4064 if (dp)
4065 nfs4_put_stid(&dp->dl_stid);
4066 if (stp)
4067 nfs4_put_stid(&stp->st_stid);
3400 4068
3401 return status; 4069 return status;
3402} 4070}
3403 4071
3404void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status) 4072void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4073 struct nfsd4_open *open, __be32 status)
3405{ 4074{
3406 if (open->op_openowner) { 4075 if (open->op_openowner) {
3407 struct nfs4_openowner *oo = open->op_openowner; 4076 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
3408 4077
3409 if (!list_empty(&oo->oo_owner.so_stateids)) 4078 nfsd4_cstate_assign_replay(cstate, so);
3410 list_del_init(&oo->oo_close_lru); 4079 nfs4_put_stateowner(so);
3411 if (oo->oo_flags & NFS4_OO_NEW) {
3412 if (status) {
3413 release_openowner(oo);
3414 open->op_openowner = NULL;
3415 } else
3416 oo->oo_flags &= ~NFS4_OO_NEW;
3417 }
3418 } 4080 }
3419 if (open->op_file) 4081 if (open->op_file)
3420 nfsd4_free_file(open->op_file); 4082 nfsd4_free_file(open->op_file);
3421 if (open->op_stp) 4083 if (open->op_stp)
3422 free_generic_stateid(open->op_stp); 4084 nfs4_put_stid(&open->op_stp->st_stid);
3423}
3424
3425static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp)
3426{
3427 struct nfs4_client *found;
3428
3429 if (STALE_CLIENTID(clid, nn))
3430 return nfserr_stale_clientid;
3431 found = find_confirmed_client(clid, session, nn);
3432 if (clp)
3433 *clp = found;
3434 return found ? nfs_ok : nfserr_expired;
3435} 4085}
3436 4086
3437__be32 4087__be32
@@ -3442,19 +4092,18 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3442 __be32 status; 4092 __be32 status;
3443 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4093 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3444 4094
3445 nfs4_lock_state();
3446 dprintk("process_renew(%08x/%08x): starting\n", 4095 dprintk("process_renew(%08x/%08x): starting\n",
3447 clid->cl_boot, clid->cl_id); 4096 clid->cl_boot, clid->cl_id);
3448 status = lookup_clientid(clid, cstate->minorversion, nn, &clp); 4097 status = lookup_clientid(clid, cstate, nn);
3449 if (status) 4098 if (status)
3450 goto out; 4099 goto out;
4100 clp = cstate->clp;
3451 status = nfserr_cb_path_down; 4101 status = nfserr_cb_path_down;
3452 if (!list_empty(&clp->cl_delegations) 4102 if (!list_empty(&clp->cl_delegations)
3453 && clp->cl_cb_state != NFSD4_CB_UP) 4103 && clp->cl_cb_state != NFSD4_CB_UP)
3454 goto out; 4104 goto out;
3455 status = nfs_ok; 4105 status = nfs_ok;
3456out: 4106out:
3457 nfs4_unlock_state();
3458 return status; 4107 return status;
3459} 4108}
3460 4109
@@ -3483,12 +4132,11 @@ nfs4_laundromat(struct nfsd_net *nn)
3483 struct nfs4_client *clp; 4132 struct nfs4_client *clp;
3484 struct nfs4_openowner *oo; 4133 struct nfs4_openowner *oo;
3485 struct nfs4_delegation *dp; 4134 struct nfs4_delegation *dp;
4135 struct nfs4_ol_stateid *stp;
3486 struct list_head *pos, *next, reaplist; 4136 struct list_head *pos, *next, reaplist;
3487 time_t cutoff = get_seconds() - nn->nfsd4_lease; 4137 time_t cutoff = get_seconds() - nn->nfsd4_lease;
3488 time_t t, new_timeo = nn->nfsd4_lease; 4138 time_t t, new_timeo = nn->nfsd4_lease;
3489 4139
3490 nfs4_lock_state();
3491
3492 dprintk("NFSD: laundromat service - starting\n"); 4140 dprintk("NFSD: laundromat service - starting\n");
3493 nfsd4_end_grace(nn); 4141 nfsd4_end_grace(nn);
3494 INIT_LIST_HEAD(&reaplist); 4142 INIT_LIST_HEAD(&reaplist);
@@ -3505,13 +4153,14 @@ nfs4_laundromat(struct nfsd_net *nn)
3505 clp->cl_clientid.cl_id); 4153 clp->cl_clientid.cl_id);
3506 continue; 4154 continue;
3507 } 4155 }
3508 list_move(&clp->cl_lru, &reaplist); 4156 list_add(&clp->cl_lru, &reaplist);
3509 } 4157 }
3510 spin_unlock(&nn->client_lock); 4158 spin_unlock(&nn->client_lock);
3511 list_for_each_safe(pos, next, &reaplist) { 4159 list_for_each_safe(pos, next, &reaplist) {
3512 clp = list_entry(pos, struct nfs4_client, cl_lru); 4160 clp = list_entry(pos, struct nfs4_client, cl_lru);
3513 dprintk("NFSD: purging unused client (clientid %08x)\n", 4161 dprintk("NFSD: purging unused client (clientid %08x)\n",
3514 clp->cl_clientid.cl_id); 4162 clp->cl_clientid.cl_id);
4163 list_del_init(&clp->cl_lru);
3515 expire_client(clp); 4164 expire_client(clp);
3516 } 4165 }
3517 spin_lock(&state_lock); 4166 spin_lock(&state_lock);
@@ -3524,24 +4173,37 @@ nfs4_laundromat(struct nfsd_net *nn)
3524 new_timeo = min(new_timeo, t); 4173 new_timeo = min(new_timeo, t);
3525 break; 4174 break;
3526 } 4175 }
3527 list_move(&dp->dl_recall_lru, &reaplist); 4176 unhash_delegation_locked(dp);
4177 list_add(&dp->dl_recall_lru, &reaplist);
3528 } 4178 }
3529 spin_unlock(&state_lock); 4179 spin_unlock(&state_lock);
3530 list_for_each_safe(pos, next, &reaplist) { 4180 while (!list_empty(&reaplist)) {
3531 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 4181 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4182 dl_recall_lru);
4183 list_del_init(&dp->dl_recall_lru);
3532 revoke_delegation(dp); 4184 revoke_delegation(dp);
3533 } 4185 }
3534 list_for_each_safe(pos, next, &nn->close_lru) { 4186
3535 oo = container_of(pos, struct nfs4_openowner, oo_close_lru); 4187 spin_lock(&nn->client_lock);
3536 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) { 4188 while (!list_empty(&nn->close_lru)) {
4189 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4190 oo_close_lru);
4191 if (time_after((unsigned long)oo->oo_time,
4192 (unsigned long)cutoff)) {
3537 t = oo->oo_time - cutoff; 4193 t = oo->oo_time - cutoff;
3538 new_timeo = min(new_timeo, t); 4194 new_timeo = min(new_timeo, t);
3539 break; 4195 break;
3540 } 4196 }
3541 release_openowner(oo); 4197 list_del_init(&oo->oo_close_lru);
4198 stp = oo->oo_last_closed_stid;
4199 oo->oo_last_closed_stid = NULL;
4200 spin_unlock(&nn->client_lock);
4201 nfs4_put_stid(&stp->st_stid);
4202 spin_lock(&nn->client_lock);
3542 } 4203 }
4204 spin_unlock(&nn->client_lock);
4205
3543 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 4206 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
3544 nfs4_unlock_state();
3545 return new_timeo; 4207 return new_timeo;
3546} 4208}
3547 4209
@@ -3564,7 +4226,7 @@ laundromat_main(struct work_struct *laundry)
3564 4226
3565static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 4227static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3566{ 4228{
3567 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode) 4229 if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
3568 return nfserr_bad_stateid; 4230 return nfserr_bad_stateid;
3569 return nfs_ok; 4231 return nfs_ok;
3570} 4232}
@@ -3666,10 +4328,10 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3666{ 4328{
3667 struct nfs4_stid *s; 4329 struct nfs4_stid *s;
3668 struct nfs4_ol_stateid *ols; 4330 struct nfs4_ol_stateid *ols;
3669 __be32 status; 4331 __be32 status = nfserr_bad_stateid;
3670 4332
3671 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4333 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3672 return nfserr_bad_stateid; 4334 return status;
3673 /* Client debugging aid. */ 4335 /* Client debugging aid. */
3674 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) { 4336 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
3675 char addr_str[INET6_ADDRSTRLEN]; 4337 char addr_str[INET6_ADDRSTRLEN];
@@ -3677,53 +4339,62 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3677 sizeof(addr_str)); 4339 sizeof(addr_str));
3678 pr_warn_ratelimited("NFSD: client %s testing state ID " 4340 pr_warn_ratelimited("NFSD: client %s testing state ID "
3679 "with incorrect client ID\n", addr_str); 4341 "with incorrect client ID\n", addr_str);
3680 return nfserr_bad_stateid; 4342 return status;
3681 } 4343 }
3682 s = find_stateid(cl, stateid); 4344 spin_lock(&cl->cl_lock);
4345 s = find_stateid_locked(cl, stateid);
3683 if (!s) 4346 if (!s)
3684 return nfserr_bad_stateid; 4347 goto out_unlock;
3685 status = check_stateid_generation(stateid, &s->sc_stateid, 1); 4348 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3686 if (status) 4349 if (status)
3687 return status; 4350 goto out_unlock;
3688 switch (s->sc_type) { 4351 switch (s->sc_type) {
3689 case NFS4_DELEG_STID: 4352 case NFS4_DELEG_STID:
3690 return nfs_ok; 4353 status = nfs_ok;
4354 break;
3691 case NFS4_REVOKED_DELEG_STID: 4355 case NFS4_REVOKED_DELEG_STID:
3692 return nfserr_deleg_revoked; 4356 status = nfserr_deleg_revoked;
4357 break;
3693 case NFS4_OPEN_STID: 4358 case NFS4_OPEN_STID:
3694 case NFS4_LOCK_STID: 4359 case NFS4_LOCK_STID:
3695 ols = openlockstateid(s); 4360 ols = openlockstateid(s);
3696 if (ols->st_stateowner->so_is_open_owner 4361 if (ols->st_stateowner->so_is_open_owner
3697 && !(openowner(ols->st_stateowner)->oo_flags 4362 && !(openowner(ols->st_stateowner)->oo_flags
3698 & NFS4_OO_CONFIRMED)) 4363 & NFS4_OO_CONFIRMED))
3699 return nfserr_bad_stateid; 4364 status = nfserr_bad_stateid;
3700 return nfs_ok; 4365 else
4366 status = nfs_ok;
4367 break;
3701 default: 4368 default:
3702 printk("unknown stateid type %x\n", s->sc_type); 4369 printk("unknown stateid type %x\n", s->sc_type);
4370 /* Fallthrough */
3703 case NFS4_CLOSED_STID: 4371 case NFS4_CLOSED_STID:
3704 return nfserr_bad_stateid; 4372 case NFS4_CLOSED_DELEG_STID:
4373 status = nfserr_bad_stateid;
3705 } 4374 }
4375out_unlock:
4376 spin_unlock(&cl->cl_lock);
4377 return status;
3706} 4378}
3707 4379
3708static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, 4380static __be32
3709 struct nfs4_stid **s, bool sessions, 4381nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
3710 struct nfsd_net *nn) 4382 stateid_t *stateid, unsigned char typemask,
4383 struct nfs4_stid **s, struct nfsd_net *nn)
3711{ 4384{
3712 struct nfs4_client *cl;
3713 __be32 status; 4385 __be32 status;
3714 4386
3715 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4387 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3716 return nfserr_bad_stateid; 4388 return nfserr_bad_stateid;
3717 status = lookup_clientid(&stateid->si_opaque.so_clid, sessions, 4389 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
3718 nn, &cl);
3719 if (status == nfserr_stale_clientid) { 4390 if (status == nfserr_stale_clientid) {
3720 if (sessions) 4391 if (cstate->session)
3721 return nfserr_bad_stateid; 4392 return nfserr_bad_stateid;
3722 return nfserr_stale_stateid; 4393 return nfserr_stale_stateid;
3723 } 4394 }
3724 if (status) 4395 if (status)
3725 return status; 4396 return status;
3726 *s = find_stateid_by_type(cl, stateid, typemask); 4397 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
3727 if (!*s) 4398 if (!*s)
3728 return nfserr_bad_stateid; 4399 return nfserr_bad_stateid;
3729 return nfs_ok; 4400 return nfs_ok;
@@ -3754,12 +4425,11 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3754 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) 4425 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3755 return check_special_stateids(net, current_fh, stateid, flags); 4426 return check_special_stateids(net, current_fh, stateid, flags);
3756 4427
3757 nfs4_lock_state(); 4428 status = nfsd4_lookup_stateid(cstate, stateid,
3758 4429 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
3759 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, 4430 &s, nn);
3760 &s, cstate->minorversion, nn);
3761 if (status) 4431 if (status)
3762 goto out; 4432 return status;
3763 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate)); 4433 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3764 if (status) 4434 if (status)
3765 goto out; 4435 goto out;
@@ -3770,12 +4440,13 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3770 if (status) 4440 if (status)
3771 goto out; 4441 goto out;
3772 if (filpp) { 4442 if (filpp) {
3773 file = dp->dl_file->fi_deleg_file; 4443 file = dp->dl_stid.sc_file->fi_deleg_file;
3774 if (!file) { 4444 if (!file) {
3775 WARN_ON_ONCE(1); 4445 WARN_ON_ONCE(1);
3776 status = nfserr_serverfault; 4446 status = nfserr_serverfault;
3777 goto out; 4447 goto out;
3778 } 4448 }
4449 get_file(file);
3779 } 4450 }
3780 break; 4451 break;
3781 case NFS4_OPEN_STID: 4452 case NFS4_OPEN_STID:
@@ -3791,10 +4462,12 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3791 if (status) 4462 if (status)
3792 goto out; 4463 goto out;
3793 if (filpp) { 4464 if (filpp) {
4465 struct nfs4_file *fp = stp->st_stid.sc_file;
4466
3794 if (flags & RD_STATE) 4467 if (flags & RD_STATE)
3795 file = find_readable_file(stp->st_file); 4468 file = find_readable_file(fp);
3796 else 4469 else
3797 file = find_writeable_file(stp->st_file); 4470 file = find_writeable_file(fp);
3798 } 4471 }
3799 break; 4472 break;
3800 default: 4473 default:
@@ -3803,28 +4476,12 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3803 } 4476 }
3804 status = nfs_ok; 4477 status = nfs_ok;
3805 if (file) 4478 if (file)
3806 *filpp = get_file(file); 4479 *filpp = file;
3807out: 4480out:
3808 nfs4_unlock_state(); 4481 nfs4_put_stid(s);
3809 return status; 4482 return status;
3810} 4483}
3811 4484
3812static __be32
3813nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3814{
3815 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
3816
3817 if (check_for_locks(stp->st_file, lo))
3818 return nfserr_locks_held;
3819 /*
3820 * Currently there's a 1-1 lock stateid<->lockowner
3821 * correspondance, and we have to delete the lockowner when we
3822 * delete the lock stateid:
3823 */
3824 release_lockowner(lo);
3825 return nfs_ok;
3826}
3827
3828/* 4485/*
3829 * Test if the stateid is valid 4486 * Test if the stateid is valid
3830 */ 4487 */
@@ -3835,11 +4492,9 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3835 struct nfsd4_test_stateid_id *stateid; 4492 struct nfsd4_test_stateid_id *stateid;
3836 struct nfs4_client *cl = cstate->session->se_client; 4493 struct nfs4_client *cl = cstate->session->se_client;
3837 4494
3838 nfs4_lock_state();
3839 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 4495 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3840 stateid->ts_id_status = 4496 stateid->ts_id_status =
3841 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 4497 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
3842 nfs4_unlock_state();
3843 4498
3844 return nfs_ok; 4499 return nfs_ok;
3845} 4500}
@@ -3851,37 +4506,50 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3851 stateid_t *stateid = &free_stateid->fr_stateid; 4506 stateid_t *stateid = &free_stateid->fr_stateid;
3852 struct nfs4_stid *s; 4507 struct nfs4_stid *s;
3853 struct nfs4_delegation *dp; 4508 struct nfs4_delegation *dp;
4509 struct nfs4_ol_stateid *stp;
3854 struct nfs4_client *cl = cstate->session->se_client; 4510 struct nfs4_client *cl = cstate->session->se_client;
3855 __be32 ret = nfserr_bad_stateid; 4511 __be32 ret = nfserr_bad_stateid;
3856 4512
3857 nfs4_lock_state(); 4513 spin_lock(&cl->cl_lock);
3858 s = find_stateid(cl, stateid); 4514 s = find_stateid_locked(cl, stateid);
3859 if (!s) 4515 if (!s)
3860 goto out; 4516 goto out_unlock;
3861 switch (s->sc_type) { 4517 switch (s->sc_type) {
3862 case NFS4_DELEG_STID: 4518 case NFS4_DELEG_STID:
3863 ret = nfserr_locks_held; 4519 ret = nfserr_locks_held;
3864 goto out; 4520 break;
3865 case NFS4_OPEN_STID: 4521 case NFS4_OPEN_STID:
3866 case NFS4_LOCK_STID:
3867 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 4522 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3868 if (ret) 4523 if (ret)
3869 goto out; 4524 break;
3870 if (s->sc_type == NFS4_LOCK_STID) 4525 ret = nfserr_locks_held;
3871 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3872 else
3873 ret = nfserr_locks_held;
3874 break; 4526 break;
4527 case NFS4_LOCK_STID:
4528 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4529 if (ret)
4530 break;
4531 stp = openlockstateid(s);
4532 ret = nfserr_locks_held;
4533 if (check_for_locks(stp->st_stid.sc_file,
4534 lockowner(stp->st_stateowner)))
4535 break;
4536 unhash_lock_stateid(stp);
4537 spin_unlock(&cl->cl_lock);
4538 nfs4_put_stid(s);
4539 ret = nfs_ok;
4540 goto out;
3875 case NFS4_REVOKED_DELEG_STID: 4541 case NFS4_REVOKED_DELEG_STID:
3876 dp = delegstateid(s); 4542 dp = delegstateid(s);
3877 destroy_revoked_delegation(dp); 4543 list_del_init(&dp->dl_recall_lru);
4544 spin_unlock(&cl->cl_lock);
4545 nfs4_put_stid(s);
3878 ret = nfs_ok; 4546 ret = nfs_ok;
3879 break; 4547 goto out;
3880 default: 4548 /* Default falls through and returns nfserr_bad_stateid */
3881 ret = nfserr_bad_stateid;
3882 } 4549 }
4550out_unlock:
4551 spin_unlock(&cl->cl_lock);
3883out: 4552out:
3884 nfs4_unlock_state();
3885 return ret; 4553 return ret;
3886} 4554}
3887 4555
@@ -3926,20 +4594,24 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3926{ 4594{
3927 __be32 status; 4595 __be32 status;
3928 struct nfs4_stid *s; 4596 struct nfs4_stid *s;
4597 struct nfs4_ol_stateid *stp = NULL;
3929 4598
3930 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__, 4599 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3931 seqid, STATEID_VAL(stateid)); 4600 seqid, STATEID_VAL(stateid));
3932 4601
3933 *stpp = NULL; 4602 *stpp = NULL;
3934 status = nfsd4_lookup_stateid(stateid, typemask, &s, 4603 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
3935 cstate->minorversion, nn);
3936 if (status) 4604 if (status)
3937 return status; 4605 return status;
3938 *stpp = openlockstateid(s); 4606 stp = openlockstateid(s);
3939 if (!nfsd4_has_session(cstate)) 4607 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
3940 cstate->replay_owner = (*stpp)->st_stateowner;
3941 4608
3942 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); 4609 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4610 if (!status)
4611 *stpp = stp;
4612 else
4613 nfs4_put_stid(&stp->st_stid);
4614 return status;
3943} 4615}
3944 4616
3945static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 4617static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
@@ -3947,14 +4619,18 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
3947{ 4619{
3948 __be32 status; 4620 __be32 status;
3949 struct nfs4_openowner *oo; 4621 struct nfs4_openowner *oo;
4622 struct nfs4_ol_stateid *stp;
3950 4623
3951 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 4624 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3952 NFS4_OPEN_STID, stpp, nn); 4625 NFS4_OPEN_STID, &stp, nn);
3953 if (status) 4626 if (status)
3954 return status; 4627 return status;
3955 oo = openowner((*stpp)->st_stateowner); 4628 oo = openowner(stp->st_stateowner);
3956 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) 4629 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4630 nfs4_put_stid(&stp->st_stid);
3957 return nfserr_bad_stateid; 4631 return nfserr_bad_stateid;
4632 }
4633 *stpp = stp;
3958 return nfs_ok; 4634 return nfs_ok;
3959} 4635}
3960 4636
@@ -3974,8 +4650,6 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3974 if (status) 4650 if (status)
3975 return status; 4651 return status;
3976 4652
3977 nfs4_lock_state();
3978
3979 status = nfs4_preprocess_seqid_op(cstate, 4653 status = nfs4_preprocess_seqid_op(cstate,
3980 oc->oc_seqid, &oc->oc_req_stateid, 4654 oc->oc_seqid, &oc->oc_req_stateid,
3981 NFS4_OPEN_STID, &stp, nn); 4655 NFS4_OPEN_STID, &stp, nn);
@@ -3984,7 +4658,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3984 oo = openowner(stp->st_stateowner); 4658 oo = openowner(stp->st_stateowner);
3985 status = nfserr_bad_stateid; 4659 status = nfserr_bad_stateid;
3986 if (oo->oo_flags & NFS4_OO_CONFIRMED) 4660 if (oo->oo_flags & NFS4_OO_CONFIRMED)
3987 goto out; 4661 goto put_stateid;
3988 oo->oo_flags |= NFS4_OO_CONFIRMED; 4662 oo->oo_flags |= NFS4_OO_CONFIRMED;
3989 update_stateid(&stp->st_stid.sc_stateid); 4663 update_stateid(&stp->st_stid.sc_stateid);
3990 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4664 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -3993,10 +4667,10 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3993 4667
3994 nfsd4_client_record_create(oo->oo_owner.so_client); 4668 nfsd4_client_record_create(oo->oo_owner.so_client);
3995 status = nfs_ok; 4669 status = nfs_ok;
4670put_stateid:
4671 nfs4_put_stid(&stp->st_stid);
3996out: 4672out:
3997 nfsd4_bump_seqid(cstate, status); 4673 nfsd4_bump_seqid(cstate, status);
3998 if (!cstate->replay_owner)
3999 nfs4_unlock_state();
4000 return status; 4674 return status;
4001} 4675}
4002 4676
@@ -4004,7 +4678,7 @@ static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 a
4004{ 4678{
4005 if (!test_access(access, stp)) 4679 if (!test_access(access, stp))
4006 return; 4680 return;
4007 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access)); 4681 nfs4_file_put_access(stp->st_stid.sc_file, access);
4008 clear_access(access, stp); 4682 clear_access(access, stp);
4009} 4683}
4010 4684
@@ -4026,16 +4700,6 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
4026 } 4700 }
4027} 4701}
4028 4702
4029static void
4030reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
4031{
4032 int i;
4033 for (i = 0; i < 4; i++) {
4034 if ((i & deny) != i)
4035 clear_deny(i, stp);
4036 }
4037}
4038
4039__be32 4703__be32
4040nfsd4_open_downgrade(struct svc_rqst *rqstp, 4704nfsd4_open_downgrade(struct svc_rqst *rqstp,
4041 struct nfsd4_compound_state *cstate, 4705 struct nfsd4_compound_state *cstate,
@@ -4053,21 +4717,20 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
4053 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 4717 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
4054 od->od_deleg_want); 4718 od->od_deleg_want);
4055 4719
4056 nfs4_lock_state();
4057 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 4720 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4058 &od->od_stateid, &stp, nn); 4721 &od->od_stateid, &stp, nn);
4059 if (status) 4722 if (status)
4060 goto out; 4723 goto out;
4061 status = nfserr_inval; 4724 status = nfserr_inval;
4062 if (!test_access(od->od_share_access, stp)) { 4725 if (!test_access(od->od_share_access, stp)) {
4063 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n", 4726 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
4064 stp->st_access_bmap, od->od_share_access); 4727 stp->st_access_bmap, od->od_share_access);
4065 goto out; 4728 goto put_stateid;
4066 } 4729 }
4067 if (!test_deny(od->od_share_deny, stp)) { 4730 if (!test_deny(od->od_share_deny, stp)) {
4068 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", 4731 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
4069 stp->st_deny_bmap, od->od_share_deny); 4732 stp->st_deny_bmap, od->od_share_deny);
4070 goto out; 4733 goto put_stateid;
4071 } 4734 }
4072 nfs4_stateid_downgrade(stp, od->od_share_access); 4735 nfs4_stateid_downgrade(stp, od->od_share_access);
4073 4736
@@ -4076,17 +4739,31 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
4076 update_stateid(&stp->st_stid.sc_stateid); 4739 update_stateid(&stp->st_stid.sc_stateid);
4077 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4740 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4078 status = nfs_ok; 4741 status = nfs_ok;
4742put_stateid:
4743 nfs4_put_stid(&stp->st_stid);
4079out: 4744out:
4080 nfsd4_bump_seqid(cstate, status); 4745 nfsd4_bump_seqid(cstate, status);
4081 if (!cstate->replay_owner)
4082 nfs4_unlock_state();
4083 return status; 4746 return status;
4084} 4747}
4085 4748
4086static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 4749static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
4087{ 4750{
4088 unhash_open_stateid(s); 4751 struct nfs4_client *clp = s->st_stid.sc_client;
4752 LIST_HEAD(reaplist);
4753
4089 s->st_stid.sc_type = NFS4_CLOSED_STID; 4754 s->st_stid.sc_type = NFS4_CLOSED_STID;
4755 spin_lock(&clp->cl_lock);
4756 unhash_open_stateid(s, &reaplist);
4757
4758 if (clp->cl_minorversion) {
4759 put_ol_stateid_locked(s, &reaplist);
4760 spin_unlock(&clp->cl_lock);
4761 free_ol_stateid_reaplist(&reaplist);
4762 } else {
4763 spin_unlock(&clp->cl_lock);
4764 free_ol_stateid_reaplist(&reaplist);
4765 move_to_close_lru(s, clp->net);
4766 }
4090} 4767}
4091 4768
4092/* 4769/*
@@ -4097,7 +4774,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4097 struct nfsd4_close *close) 4774 struct nfsd4_close *close)
4098{ 4775{
4099 __be32 status; 4776 __be32 status;
4100 struct nfs4_openowner *oo;
4101 struct nfs4_ol_stateid *stp; 4777 struct nfs4_ol_stateid *stp;
4102 struct net *net = SVC_NET(rqstp); 4778 struct net *net = SVC_NET(rqstp);
4103 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4779 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -4105,7 +4781,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4105 dprintk("NFSD: nfsd4_close on file %pd\n", 4781 dprintk("NFSD: nfsd4_close on file %pd\n",
4106 cstate->current_fh.fh_dentry); 4782 cstate->current_fh.fh_dentry);
4107 4783
4108 nfs4_lock_state();
4109 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 4784 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
4110 &close->cl_stateid, 4785 &close->cl_stateid,
4111 NFS4_OPEN_STID|NFS4_CLOSED_STID, 4786 NFS4_OPEN_STID|NFS4_CLOSED_STID,
@@ -4113,31 +4788,14 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4113 nfsd4_bump_seqid(cstate, status); 4788 nfsd4_bump_seqid(cstate, status);
4114 if (status) 4789 if (status)
4115 goto out; 4790 goto out;
4116 oo = openowner(stp->st_stateowner);
4117 update_stateid(&stp->st_stid.sc_stateid); 4791 update_stateid(&stp->st_stid.sc_stateid);
4118 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 4792 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4119 4793
4120 nfsd4_close_open_stateid(stp); 4794 nfsd4_close_open_stateid(stp);
4121 4795
4122 if (cstate->minorversion) 4796 /* put reference from nfs4_preprocess_seqid_op */
4123 free_generic_stateid(stp); 4797 nfs4_put_stid(&stp->st_stid);
4124 else
4125 oo->oo_last_closed_stid = stp;
4126
4127 if (list_empty(&oo->oo_owner.so_stateids)) {
4128 if (cstate->minorversion)
4129 release_openowner(oo);
4130 else {
4131 /*
4132 * In the 4.0 case we need to keep the owners around a
4133 * little while to handle CLOSE replay.
4134 */
4135 move_to_close_lru(oo, SVC_NET(rqstp));
4136 }
4137 }
4138out: 4798out:
4139 if (!cstate->replay_owner)
4140 nfs4_unlock_state();
4141 return status; 4799 return status;
4142} 4800}
4143 4801
@@ -4154,28 +4812,24 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4154 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 4812 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4155 return status; 4813 return status;
4156 4814
4157 nfs4_lock_state(); 4815 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4158 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s,
4159 cstate->minorversion, nn);
4160 if (status) 4816 if (status)
4161 goto out; 4817 goto out;
4162 dp = delegstateid(s); 4818 dp = delegstateid(s);
4163 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate)); 4819 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4164 if (status) 4820 if (status)
4165 goto out; 4821 goto put_stateid;
4166 4822
4167 destroy_delegation(dp); 4823 destroy_delegation(dp);
4824put_stateid:
4825 nfs4_put_stid(&dp->dl_stid);
4168out: 4826out:
4169 nfs4_unlock_state();
4170
4171 return status; 4827 return status;
4172} 4828}
4173 4829
4174 4830
4175#define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start)) 4831#define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
4176 4832
4177#define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
4178
4179static inline u64 4833static inline u64
4180end_offset(u64 start, u64 len) 4834end_offset(u64 start, u64 len)
4181{ 4835{
@@ -4196,13 +4850,6 @@ last_byte_offset(u64 start, u64 len)
4196 return end > start ? end - 1: NFS4_MAX_UINT64; 4850 return end > start ? end - 1: NFS4_MAX_UINT64;
4197} 4851}
4198 4852
4199static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername)
4200{
4201 return (file_hashval(inode) + cl_id
4202 + opaque_hashval(ownername->data, ownername->len))
4203 & LOCKOWNER_INO_HASH_MASK;
4204}
4205
4206/* 4853/*
4207 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 4854 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4208 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 4855 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
@@ -4255,47 +4902,56 @@ nevermind:
4255 deny->ld_type = NFS4_WRITE_LT; 4902 deny->ld_type = NFS4_WRITE_LT;
4256} 4903}
4257 4904
4258static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner) 4905static struct nfs4_lockowner *
4906find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
4907 struct nfs4_client *clp)
4259{ 4908{
4260 struct nfs4_ol_stateid *lst; 4909 unsigned int strhashval = ownerstr_hashval(owner);
4910 struct nfs4_stateowner *so;
4261 4911
4262 if (!same_owner_str(&lo->lo_owner, owner, clid)) 4912 lockdep_assert_held(&clp->cl_lock);
4263 return false; 4913
4264 if (list_empty(&lo->lo_owner.so_stateids)) { 4914 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
4265 WARN_ON_ONCE(1); 4915 so_strhash) {
4266 return false; 4916 if (so->so_is_open_owner)
4917 continue;
4918 if (!same_owner_str(so, owner))
4919 continue;
4920 atomic_inc(&so->so_count);
4921 return lockowner(so);
4267 } 4922 }
4268 lst = list_first_entry(&lo->lo_owner.so_stateids, 4923 return NULL;
4269 struct nfs4_ol_stateid, st_perstateowner);
4270 return lst->st_file->fi_inode == inode;
4271} 4924}
4272 4925
4273static struct nfs4_lockowner * 4926static struct nfs4_lockowner *
4274find_lockowner_str(struct inode *inode, clientid_t *clid, 4927find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
4275 struct xdr_netobj *owner, struct nfsd_net *nn) 4928 struct nfs4_client *clp)
4276{ 4929{
4277 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
4278 struct nfs4_lockowner *lo; 4930 struct nfs4_lockowner *lo;
4279 4931
4280 list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) { 4932 spin_lock(&clp->cl_lock);
4281 if (same_lockowner_ino(lo, inode, clid, owner)) 4933 lo = find_lockowner_str_locked(clid, owner, clp);
4282 return lo; 4934 spin_unlock(&clp->cl_lock);
4283 } 4935 return lo;
4284 return NULL;
4285} 4936}
4286 4937
4287static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp) 4938static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
4288{ 4939{
4289 struct inode *inode = open_stp->st_file->fi_inode; 4940 unhash_lockowner_locked(lockowner(sop));
4290 unsigned int inohash = lockowner_ino_hashval(inode, 4941}
4291 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner); 4942
4292 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 4943static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
4944{
4945 struct nfs4_lockowner *lo = lockowner(sop);
4293 4946
4294 list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]); 4947 kmem_cache_free(lockowner_slab, lo);
4295 list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]);
4296 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
4297} 4948}
4298 4949
4950static const struct nfs4_stateowner_operations lockowner_ops = {
4951 .so_unhash = nfs4_unhash_lockowner,
4952 .so_free = nfs4_free_lockowner,
4953};
4954
4299/* 4955/*
4300 * Alloc a lock owner structure. 4956 * Alloc a lock owner structure.
4301 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 4957 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
@@ -4303,42 +4959,107 @@ static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, s
4303 * 4959 *
4304 * strhashval = ownerstr_hashval 4960 * strhashval = ownerstr_hashval
4305 */ 4961 */
4306
4307static struct nfs4_lockowner * 4962static struct nfs4_lockowner *
4308alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { 4963alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
4309 struct nfs4_lockowner *lo; 4964 struct nfs4_ol_stateid *open_stp,
4965 struct nfsd4_lock *lock)
4966{
4967 struct nfs4_lockowner *lo, *ret;
4310 4968
4311 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 4969 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
4312 if (!lo) 4970 if (!lo)
4313 return NULL; 4971 return NULL;
4314 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 4972 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4315 lo->lo_owner.so_is_open_owner = 0; 4973 lo->lo_owner.so_is_open_owner = 0;
4316 /* It is the openowner seqid that will be incremented in encode in the 4974 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
4317 * case of new lockowners; so increment the lock seqid manually: */ 4975 lo->lo_owner.so_ops = &lockowner_ops;
4318 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1; 4976 spin_lock(&clp->cl_lock);
4319 hash_lockowner(lo, strhashval, clp, open_stp); 4977 ret = find_lockowner_str_locked(&clp->cl_clientid,
4978 &lock->lk_new_owner, clp);
4979 if (ret == NULL) {
4980 list_add(&lo->lo_owner.so_strhash,
4981 &clp->cl_ownerstr_hashtbl[strhashval]);
4982 ret = lo;
4983 } else
4984 nfs4_free_lockowner(&lo->lo_owner);
4985 spin_unlock(&clp->cl_lock);
4320 return lo; 4986 return lo;
4321} 4987}
4322 4988
4323static struct nfs4_ol_stateid * 4989static void
4324alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp) 4990init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
4991 struct nfs4_file *fp, struct inode *inode,
4992 struct nfs4_ol_stateid *open_stp)
4325{ 4993{
4326 struct nfs4_ol_stateid *stp;
4327 struct nfs4_client *clp = lo->lo_owner.so_client; 4994 struct nfs4_client *clp = lo->lo_owner.so_client;
4328 4995
4329 stp = nfs4_alloc_stateid(clp); 4996 lockdep_assert_held(&clp->cl_lock);
4330 if (stp == NULL) 4997
4331 return NULL; 4998 atomic_inc(&stp->st_stid.sc_count);
4332 stp->st_stid.sc_type = NFS4_LOCK_STID; 4999 stp->st_stid.sc_type = NFS4_LOCK_STID;
4333 list_add(&stp->st_perfile, &fp->fi_stateids);
4334 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4335 stp->st_stateowner = &lo->lo_owner; 5000 stp->st_stateowner = &lo->lo_owner;
5001 atomic_inc(&lo->lo_owner.so_count);
4336 get_nfs4_file(fp); 5002 get_nfs4_file(fp);
4337 stp->st_file = fp; 5003 stp->st_stid.sc_file = fp;
5004 stp->st_stid.sc_free = nfs4_free_lock_stateid;
4338 stp->st_access_bmap = 0; 5005 stp->st_access_bmap = 0;
4339 stp->st_deny_bmap = open_stp->st_deny_bmap; 5006 stp->st_deny_bmap = open_stp->st_deny_bmap;
4340 stp->st_openstp = open_stp; 5007 stp->st_openstp = open_stp;
4341 return stp; 5008 list_add(&stp->st_locks, &open_stp->st_locks);
5009 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5010 spin_lock(&fp->fi_lock);
5011 list_add(&stp->st_perfile, &fp->fi_stateids);
5012 spin_unlock(&fp->fi_lock);
5013}
5014
5015static struct nfs4_ol_stateid *
5016find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5017{
5018 struct nfs4_ol_stateid *lst;
5019 struct nfs4_client *clp = lo->lo_owner.so_client;
5020
5021 lockdep_assert_held(&clp->cl_lock);
5022
5023 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5024 if (lst->st_stid.sc_file == fp) {
5025 atomic_inc(&lst->st_stid.sc_count);
5026 return lst;
5027 }
5028 }
5029 return NULL;
5030}
5031
5032static struct nfs4_ol_stateid *
5033find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5034 struct inode *inode, struct nfs4_ol_stateid *ost,
5035 bool *new)
5036{
5037 struct nfs4_stid *ns = NULL;
5038 struct nfs4_ol_stateid *lst;
5039 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5040 struct nfs4_client *clp = oo->oo_owner.so_client;
5041
5042 spin_lock(&clp->cl_lock);
5043 lst = find_lock_stateid(lo, fi);
5044 if (lst == NULL) {
5045 spin_unlock(&clp->cl_lock);
5046 ns = nfs4_alloc_stid(clp, stateid_slab);
5047 if (ns == NULL)
5048 return NULL;
5049
5050 spin_lock(&clp->cl_lock);
5051 lst = find_lock_stateid(lo, fi);
5052 if (likely(!lst)) {
5053 lst = openlockstateid(ns);
5054 init_lock_stateid(lst, lo, fi, inode, ost);
5055 ns = NULL;
5056 *new = true;
5057 }
5058 }
5059 spin_unlock(&clp->cl_lock);
5060 if (ns)
5061 nfs4_put_stid(ns);
5062 return lst;
4342} 5063}
4343 5064
4344static int 5065static int
@@ -4350,46 +5071,53 @@ check_lock_length(u64 offset, u64 length)
4350 5071
4351static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 5072static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4352{ 5073{
4353 struct nfs4_file *fp = lock_stp->st_file; 5074 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
4354 int oflag = nfs4_access_to_omode(access); 5075
5076 lockdep_assert_held(&fp->fi_lock);
4355 5077
4356 if (test_access(access, lock_stp)) 5078 if (test_access(access, lock_stp))
4357 return; 5079 return;
4358 nfs4_file_get_access(fp, oflag); 5080 __nfs4_file_get_access(fp, access);
4359 set_access(access, lock_stp); 5081 set_access(access, lock_stp);
4360} 5082}
4361 5083
4362static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new) 5084static __be32
5085lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5086 struct nfs4_ol_stateid *ost,
5087 struct nfsd4_lock *lock,
5088 struct nfs4_ol_stateid **lst, bool *new)
4363{ 5089{
4364 struct nfs4_file *fi = ost->st_file; 5090 __be32 status;
5091 struct nfs4_file *fi = ost->st_stid.sc_file;
4365 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 5092 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4366 struct nfs4_client *cl = oo->oo_owner.so_client; 5093 struct nfs4_client *cl = oo->oo_owner.so_client;
5094 struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
4367 struct nfs4_lockowner *lo; 5095 struct nfs4_lockowner *lo;
4368 unsigned int strhashval; 5096 unsigned int strhashval;
4369 struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id); 5097
4370 5098 lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
4371 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, 5099 if (!lo) {
4372 &lock->v.new.owner, nn); 5100 strhashval = ownerstr_hashval(&lock->v.new.owner);
4373 if (lo) { 5101 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4374 if (!cstate->minorversion) 5102 if (lo == NULL)
4375 return nfserr_bad_seqid; 5103 return nfserr_jukebox;
4376 /* XXX: a lockowner always has exactly one stateid: */ 5104 } else {
4377 *lst = list_first_entry(&lo->lo_owner.so_stateids, 5105 /* with an existing lockowner, seqids must be the same */
4378 struct nfs4_ol_stateid, st_perstateowner); 5106 status = nfserr_bad_seqid;
4379 return nfs_ok; 5107 if (!cstate->minorversion &&
5108 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5109 goto out;
4380 } 5110 }
4381 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id, 5111
4382 &lock->v.new.owner); 5112 *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
4383 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4384 if (lo == NULL)
4385 return nfserr_jukebox;
4386 *lst = alloc_init_lock_stateid(lo, fi, ost);
4387 if (*lst == NULL) { 5113 if (*lst == NULL) {
4388 release_lockowner(lo); 5114 status = nfserr_jukebox;
4389 return nfserr_jukebox; 5115 goto out;
4390 } 5116 }
4391 *new = true; 5117 status = nfs_ok;
4392 return nfs_ok; 5118out:
5119 nfs4_put_stateowner(&lo->lo_owner);
5120 return status;
4393} 5121}
4394 5122
4395/* 5123/*
@@ -4401,14 +5129,16 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4401{ 5129{
4402 struct nfs4_openowner *open_sop = NULL; 5130 struct nfs4_openowner *open_sop = NULL;
4403 struct nfs4_lockowner *lock_sop = NULL; 5131 struct nfs4_lockowner *lock_sop = NULL;
4404 struct nfs4_ol_stateid *lock_stp; 5132 struct nfs4_ol_stateid *lock_stp = NULL;
5133 struct nfs4_ol_stateid *open_stp = NULL;
5134 struct nfs4_file *fp;
4405 struct file *filp = NULL; 5135 struct file *filp = NULL;
4406 struct file_lock *file_lock = NULL; 5136 struct file_lock *file_lock = NULL;
4407 struct file_lock *conflock = NULL; 5137 struct file_lock *conflock = NULL;
4408 __be32 status = 0; 5138 __be32 status = 0;
4409 bool new_state = false;
4410 int lkflg; 5139 int lkflg;
4411 int err; 5140 int err;
5141 bool new = false;
4412 struct net *net = SVC_NET(rqstp); 5142 struct net *net = SVC_NET(rqstp);
4413 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 5143 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4414 5144
@@ -4425,11 +5155,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4425 return status; 5155 return status;
4426 } 5156 }
4427 5157
4428 nfs4_lock_state();
4429
4430 if (lock->lk_is_new) { 5158 if (lock->lk_is_new) {
4431 struct nfs4_ol_stateid *open_stp = NULL;
4432
4433 if (nfsd4_has_session(cstate)) 5159 if (nfsd4_has_session(cstate))
4434 /* See rfc 5661 18.10.3: given clientid is ignored: */ 5160 /* See rfc 5661 18.10.3: given clientid is ignored: */
4435 memcpy(&lock->v.new.clientid, 5161 memcpy(&lock->v.new.clientid,
@@ -4453,12 +5179,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4453 &lock->v.new.clientid)) 5179 &lock->v.new.clientid))
4454 goto out; 5180 goto out;
4455 status = lookup_or_create_lock_state(cstate, open_stp, lock, 5181 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4456 &lock_stp, &new_state); 5182 &lock_stp, &new);
4457 } else 5183 } else {
4458 status = nfs4_preprocess_seqid_op(cstate, 5184 status = nfs4_preprocess_seqid_op(cstate,
4459 lock->lk_old_lock_seqid, 5185 lock->lk_old_lock_seqid,
4460 &lock->lk_old_lock_stateid, 5186 &lock->lk_old_lock_stateid,
4461 NFS4_LOCK_STID, &lock_stp, nn); 5187 NFS4_LOCK_STID, &lock_stp, nn);
5188 }
4462 if (status) 5189 if (status)
4463 goto out; 5190 goto out;
4464 lock_sop = lockowner(lock_stp->st_stateowner); 5191 lock_sop = lockowner(lock_stp->st_stateowner);
@@ -4482,20 +5209,25 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4482 goto out; 5209 goto out;
4483 } 5210 }
4484 5211
5212 fp = lock_stp->st_stid.sc_file;
4485 locks_init_lock(file_lock); 5213 locks_init_lock(file_lock);
4486 switch (lock->lk_type) { 5214 switch (lock->lk_type) {
4487 case NFS4_READ_LT: 5215 case NFS4_READ_LT:
4488 case NFS4_READW_LT: 5216 case NFS4_READW_LT:
4489 filp = find_readable_file(lock_stp->st_file); 5217 spin_lock(&fp->fi_lock);
5218 filp = find_readable_file_locked(fp);
4490 if (filp) 5219 if (filp)
4491 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 5220 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5221 spin_unlock(&fp->fi_lock);
4492 file_lock->fl_type = F_RDLCK; 5222 file_lock->fl_type = F_RDLCK;
4493 break; 5223 break;
4494 case NFS4_WRITE_LT: 5224 case NFS4_WRITE_LT:
4495 case NFS4_WRITEW_LT: 5225 case NFS4_WRITEW_LT:
4496 filp = find_writeable_file(lock_stp->st_file); 5226 spin_lock(&fp->fi_lock);
5227 filp = find_writeable_file_locked(fp);
4497 if (filp) 5228 if (filp)
4498 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 5229 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5230 spin_unlock(&fp->fi_lock);
4499 file_lock->fl_type = F_WRLCK; 5231 file_lock->fl_type = F_WRLCK;
4500 break; 5232 break;
4501 default: 5233 default:
@@ -4544,11 +5276,27 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4544 break; 5276 break;
4545 } 5277 }
4546out: 5278out:
4547 if (status && new_state) 5279 if (filp)
4548 release_lockowner(lock_sop); 5280 fput(filp);
5281 if (lock_stp) {
5282 /* Bump seqid manually if the 4.0 replay owner is openowner */
5283 if (cstate->replay_owner &&
5284 cstate->replay_owner != &lock_sop->lo_owner &&
5285 seqid_mutating_err(ntohl(status)))
5286 lock_sop->lo_owner.so_seqid++;
5287
5288 /*
5289 * If this is a new, never-before-used stateid, and we are
5290 * returning an error, then just go ahead and release it.
5291 */
5292 if (status && new)
5293 release_lock_stateid(lock_stp);
5294
5295 nfs4_put_stid(&lock_stp->st_stid);
5296 }
5297 if (open_stp)
5298 nfs4_put_stid(&open_stp->st_stid);
4549 nfsd4_bump_seqid(cstate, status); 5299 nfsd4_bump_seqid(cstate, status);
4550 if (!cstate->replay_owner)
4551 nfs4_unlock_state();
4552 if (file_lock) 5300 if (file_lock)
4553 locks_free_lock(file_lock); 5301 locks_free_lock(file_lock);
4554 if (conflock) 5302 if (conflock)
@@ -4580,9 +5328,8 @@ __be32
4580nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 5328nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4581 struct nfsd4_lockt *lockt) 5329 struct nfsd4_lockt *lockt)
4582{ 5330{
4583 struct inode *inode;
4584 struct file_lock *file_lock = NULL; 5331 struct file_lock *file_lock = NULL;
4585 struct nfs4_lockowner *lo; 5332 struct nfs4_lockowner *lo = NULL;
4586 __be32 status; 5333 __be32 status;
4587 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5334 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4588 5335
@@ -4592,10 +5339,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4592 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 5339 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4593 return nfserr_inval; 5340 return nfserr_inval;
4594 5341
4595 nfs4_lock_state();
4596
4597 if (!nfsd4_has_session(cstate)) { 5342 if (!nfsd4_has_session(cstate)) {
4598 status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL); 5343 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
4599 if (status) 5344 if (status)
4600 goto out; 5345 goto out;
4601 } 5346 }
@@ -4603,7 +5348,6 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4603 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 5348 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4604 goto out; 5349 goto out;
4605 5350
4606 inode = cstate->current_fh.fh_dentry->d_inode;
4607 file_lock = locks_alloc_lock(); 5351 file_lock = locks_alloc_lock();
4608 if (!file_lock) { 5352 if (!file_lock) {
4609 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5353 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
@@ -4626,7 +5370,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4626 goto out; 5370 goto out;
4627 } 5371 }
4628 5372
4629 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn); 5373 lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
5374 cstate->clp);
4630 if (lo) 5375 if (lo)
4631 file_lock->fl_owner = (fl_owner_t)lo; 5376 file_lock->fl_owner = (fl_owner_t)lo;
4632 file_lock->fl_pid = current->tgid; 5377 file_lock->fl_pid = current->tgid;
@@ -4646,7 +5391,8 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4646 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 5391 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
4647 } 5392 }
4648out: 5393out:
4649 nfs4_unlock_state(); 5394 if (lo)
5395 nfs4_put_stateowner(&lo->lo_owner);
4650 if (file_lock) 5396 if (file_lock)
4651 locks_free_lock(file_lock); 5397 locks_free_lock(file_lock);
4652 return status; 5398 return status;
@@ -4670,23 +5416,21 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4670 if (check_lock_length(locku->lu_offset, locku->lu_length)) 5416 if (check_lock_length(locku->lu_offset, locku->lu_length))
4671 return nfserr_inval; 5417 return nfserr_inval;
4672 5418
4673 nfs4_lock_state();
4674
4675 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 5419 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4676 &locku->lu_stateid, NFS4_LOCK_STID, 5420 &locku->lu_stateid, NFS4_LOCK_STID,
4677 &stp, nn); 5421 &stp, nn);
4678 if (status) 5422 if (status)
4679 goto out; 5423 goto out;
4680 filp = find_any_file(stp->st_file); 5424 filp = find_any_file(stp->st_stid.sc_file);
4681 if (!filp) { 5425 if (!filp) {
4682 status = nfserr_lock_range; 5426 status = nfserr_lock_range;
4683 goto out; 5427 goto put_stateid;
4684 } 5428 }
4685 file_lock = locks_alloc_lock(); 5429 file_lock = locks_alloc_lock();
4686 if (!file_lock) { 5430 if (!file_lock) {
4687 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 5431 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4688 status = nfserr_jukebox; 5432 status = nfserr_jukebox;
4689 goto out; 5433 goto fput;
4690 } 5434 }
4691 locks_init_lock(file_lock); 5435 locks_init_lock(file_lock);
4692 file_lock->fl_type = F_UNLCK; 5436 file_lock->fl_type = F_UNLCK;
@@ -4708,41 +5452,51 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4708 } 5452 }
4709 update_stateid(&stp->st_stid.sc_stateid); 5453 update_stateid(&stp->st_stid.sc_stateid);
4710 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 5454 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4711 5455fput:
5456 fput(filp);
5457put_stateid:
5458 nfs4_put_stid(&stp->st_stid);
4712out: 5459out:
4713 nfsd4_bump_seqid(cstate, status); 5460 nfsd4_bump_seqid(cstate, status);
4714 if (!cstate->replay_owner)
4715 nfs4_unlock_state();
4716 if (file_lock) 5461 if (file_lock)
4717 locks_free_lock(file_lock); 5462 locks_free_lock(file_lock);
4718 return status; 5463 return status;
4719 5464
4720out_nfserr: 5465out_nfserr:
4721 status = nfserrno(err); 5466 status = nfserrno(err);
4722 goto out; 5467 goto fput;
4723} 5468}
4724 5469
4725/* 5470/*
4726 * returns 5471 * returns
4727 * 1: locks held by lockowner 5472 * true: locks held by lockowner
4728 * 0: no locks held by lockowner 5473 * false: no locks held by lockowner
4729 */ 5474 */
4730static int 5475static bool
4731check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner) 5476check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
4732{ 5477{
4733 struct file_lock **flpp; 5478 struct file_lock **flpp;
4734 struct inode *inode = filp->fi_inode; 5479 int status = false;
4735 int status = 0; 5480 struct file *filp = find_any_file(fp);
5481 struct inode *inode;
5482
5483 if (!filp) {
5484 /* Any valid lock stateid should have some sort of access */
5485 WARN_ON_ONCE(1);
5486 return status;
5487 }
5488
5489 inode = file_inode(filp);
4736 5490
4737 spin_lock(&inode->i_lock); 5491 spin_lock(&inode->i_lock);
4738 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { 5492 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4739 if ((*flpp)->fl_owner == (fl_owner_t)lowner) { 5493 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4740 status = 1; 5494 status = true;
4741 goto out; 5495 break;
4742 } 5496 }
4743 } 5497 }
4744out:
4745 spin_unlock(&inode->i_lock); 5498 spin_unlock(&inode->i_lock);
5499 fput(filp);
4746 return status; 5500 return status;
4747} 5501}
4748 5502
@@ -4753,53 +5507,46 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
4753{ 5507{
4754 clientid_t *clid = &rlockowner->rl_clientid; 5508 clientid_t *clid = &rlockowner->rl_clientid;
4755 struct nfs4_stateowner *sop; 5509 struct nfs4_stateowner *sop;
4756 struct nfs4_lockowner *lo; 5510 struct nfs4_lockowner *lo = NULL;
4757 struct nfs4_ol_stateid *stp; 5511 struct nfs4_ol_stateid *stp;
4758 struct xdr_netobj *owner = &rlockowner->rl_owner; 5512 struct xdr_netobj *owner = &rlockowner->rl_owner;
4759 struct list_head matches; 5513 unsigned int hashval = ownerstr_hashval(owner);
4760 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4761 __be32 status; 5514 __be32 status;
4762 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 5515 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5516 struct nfs4_client *clp;
4763 5517
4764 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 5518 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4765 clid->cl_boot, clid->cl_id); 5519 clid->cl_boot, clid->cl_id);
4766 5520
4767 nfs4_lock_state(); 5521 status = lookup_clientid(clid, cstate, nn);
4768
4769 status = lookup_clientid(clid, cstate->minorversion, nn, NULL);
4770 if (status) 5522 if (status)
4771 goto out; 5523 return status;
4772 5524
4773 status = nfserr_locks_held; 5525 clp = cstate->clp;
4774 INIT_LIST_HEAD(&matches); 5526 /* Find the matching lock stateowner */
5527 spin_lock(&clp->cl_lock);
5528 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
5529 so_strhash) {
4775 5530
4776 list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) { 5531 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
4777 if (sop->so_is_open_owner)
4778 continue; 5532 continue;
4779 if (!same_owner_str(sop, owner, clid)) 5533
4780 continue; 5534 /* see if there are still any locks associated with it */
4781 list_for_each_entry(stp, &sop->so_stateids, 5535 lo = lockowner(sop);
4782 st_perstateowner) { 5536 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
4783 lo = lockowner(sop); 5537 if (check_for_locks(stp->st_stid.sc_file, lo)) {
4784 if (check_for_locks(stp->st_file, lo)) 5538 status = nfserr_locks_held;
4785 goto out; 5539 spin_unlock(&clp->cl_lock);
4786 list_add(&lo->lo_list, &matches); 5540 return status;
5541 }
4787 } 5542 }
5543
5544 atomic_inc(&sop->so_count);
5545 break;
4788 } 5546 }
4789 /* Clients probably won't expect us to return with some (but not all) 5547 spin_unlock(&clp->cl_lock);
4790 * of the lockowner state released; so don't release any until all 5548 if (lo)
4791 * have been checked. */
4792 status = nfs_ok;
4793 while (!list_empty(&matches)) {
4794 lo = list_entry(matches.next, struct nfs4_lockowner,
4795 lo_list);
4796 /* unhash_stateowner deletes so_perclient only
4797 * for openowners. */
4798 list_del(&lo->lo_list);
4799 release_lockowner(lo); 5549 release_lockowner(lo);
4800 }
4801out:
4802 nfs4_unlock_state();
4803 return status; 5550 return status;
4804} 5551}
4805 5552
@@ -4887,34 +5634,123 @@ nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
4887* Called from OPEN. Look for clientid in reclaim list. 5634* Called from OPEN. Look for clientid in reclaim list.
4888*/ 5635*/
4889__be32 5636__be32
4890nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn) 5637nfs4_check_open_reclaim(clientid_t *clid,
5638 struct nfsd4_compound_state *cstate,
5639 struct nfsd_net *nn)
4891{ 5640{
4892 struct nfs4_client *clp; 5641 __be32 status;
4893 5642
4894 /* find clientid in conf_id_hashtbl */ 5643 /* find clientid in conf_id_hashtbl */
4895 clp = find_confirmed_client(clid, sessions, nn); 5644 status = lookup_clientid(clid, cstate, nn);
4896 if (clp == NULL) 5645 if (status)
4897 return nfserr_reclaim_bad; 5646 return nfserr_reclaim_bad;
4898 5647
4899 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok; 5648 if (nfsd4_client_record_check(cstate->clp))
5649 return nfserr_reclaim_bad;
5650
5651 return nfs_ok;
4900} 5652}
4901 5653
4902#ifdef CONFIG_NFSD_FAULT_INJECTION 5654#ifdef CONFIG_NFSD_FAULT_INJECTION
5655static inline void
5656put_client(struct nfs4_client *clp)
5657{
5658 atomic_dec(&clp->cl_refcount);
5659}
4903 5660
4904u64 nfsd_forget_client(struct nfs4_client *clp, u64 max) 5661static struct nfs4_client *
5662nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
4905{ 5663{
4906 if (mark_client_expired(clp)) 5664 struct nfs4_client *clp;
4907 return 0; 5665 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
4908 expire_client(clp); 5666 nfsd_net_id);
4909 return 1; 5667
5668 if (!nfsd_netns_ready(nn))
5669 return NULL;
5670
5671 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5672 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
5673 return clp;
5674 }
5675 return NULL;
4910} 5676}
4911 5677
4912u64 nfsd_print_client(struct nfs4_client *clp, u64 num) 5678u64
5679nfsd_inject_print_clients(void)
4913{ 5680{
5681 struct nfs4_client *clp;
5682 u64 count = 0;
5683 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5684 nfsd_net_id);
4914 char buf[INET6_ADDRSTRLEN]; 5685 char buf[INET6_ADDRSTRLEN];
4915 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf)); 5686
4916 printk(KERN_INFO "NFS Client: %s\n", buf); 5687 if (!nfsd_netns_ready(nn))
4917 return 1; 5688 return 0;
5689
5690 spin_lock(&nn->client_lock);
5691 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5692 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5693 pr_info("NFS Client: %s\n", buf);
5694 ++count;
5695 }
5696 spin_unlock(&nn->client_lock);
5697
5698 return count;
5699}
5700
5701u64
5702nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
5703{
5704 u64 count = 0;
5705 struct nfs4_client *clp;
5706 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5707 nfsd_net_id);
5708
5709 if (!nfsd_netns_ready(nn))
5710 return count;
5711
5712 spin_lock(&nn->client_lock);
5713 clp = nfsd_find_client(addr, addr_size);
5714 if (clp) {
5715 if (mark_client_expired_locked(clp) == nfs_ok)
5716 ++count;
5717 else
5718 clp = NULL;
5719 }
5720 spin_unlock(&nn->client_lock);
5721
5722 if (clp)
5723 expire_client(clp);
5724
5725 return count;
5726}
5727
5728u64
5729nfsd_inject_forget_clients(u64 max)
5730{
5731 u64 count = 0;
5732 struct nfs4_client *clp, *next;
5733 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5734 nfsd_net_id);
5735 LIST_HEAD(reaplist);
5736
5737 if (!nfsd_netns_ready(nn))
5738 return count;
5739
5740 spin_lock(&nn->client_lock);
5741 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5742 if (mark_client_expired_locked(clp) == nfs_ok) {
5743 list_add(&clp->cl_lru, &reaplist);
5744 if (max != 0 && ++count >= max)
5745 break;
5746 }
5747 }
5748 spin_unlock(&nn->client_lock);
5749
5750 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
5751 expire_client(clp);
5752
5753 return count;
4918} 5754}
4919 5755
4920static void nfsd_print_count(struct nfs4_client *clp, unsigned int count, 5756static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
@@ -4925,158 +5761,484 @@ static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
4925 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type); 5761 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
4926} 5762}
4927 5763
4928static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *)) 5764static void
5765nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
5766 struct list_head *collect)
5767{
5768 struct nfs4_client *clp = lst->st_stid.sc_client;
5769 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5770 nfsd_net_id);
5771
5772 if (!collect)
5773 return;
5774
5775 lockdep_assert_held(&nn->client_lock);
5776 atomic_inc(&clp->cl_refcount);
5777 list_add(&lst->st_locks, collect);
5778}
5779
5780static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
5781 struct list_head *collect,
5782 void (*func)(struct nfs4_ol_stateid *))
4929{ 5783{
4930 struct nfs4_openowner *oop; 5784 struct nfs4_openowner *oop;
4931 struct nfs4_lockowner *lop, *lo_next;
4932 struct nfs4_ol_stateid *stp, *st_next; 5785 struct nfs4_ol_stateid *stp, *st_next;
5786 struct nfs4_ol_stateid *lst, *lst_next;
4933 u64 count = 0; 5787 u64 count = 0;
4934 5788
5789 spin_lock(&clp->cl_lock);
4935 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) { 5790 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
4936 list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) { 5791 list_for_each_entry_safe(stp, st_next,
4937 list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) { 5792 &oop->oo_owner.so_stateids, st_perstateowner) {
4938 if (func) 5793 list_for_each_entry_safe(lst, lst_next,
4939 func(lop); 5794 &stp->st_locks, st_locks) {
4940 if (++count == max) 5795 if (func) {
4941 return count; 5796 func(lst);
5797 nfsd_inject_add_lock_to_list(lst,
5798 collect);
5799 }
5800 ++count;
5801 /*
5802 * Despite the fact that these functions deal
5803 * with 64-bit integers for "count", we must
5804 * ensure that it doesn't blow up the
5805 * clp->cl_refcount. Throw a warning if we
5806 * start to approach INT_MAX here.
5807 */
5808 WARN_ON_ONCE(count == (INT_MAX / 2));
5809 if (count == max)
5810 goto out;
4942 } 5811 }
4943 } 5812 }
4944 } 5813 }
5814out:
5815 spin_unlock(&clp->cl_lock);
4945 5816
4946 return count; 5817 return count;
4947} 5818}
4948 5819
4949u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max) 5820static u64
5821nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
5822 u64 max)
4950{ 5823{
4951 return nfsd_foreach_client_lock(clp, max, release_lockowner); 5824 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
4952} 5825}
4953 5826
4954u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max) 5827static u64
5828nfsd_print_client_locks(struct nfs4_client *clp)
4955{ 5829{
4956 u64 count = nfsd_foreach_client_lock(clp, max, NULL); 5830 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
4957 nfsd_print_count(clp, count, "locked files"); 5831 nfsd_print_count(clp, count, "locked files");
4958 return count; 5832 return count;
4959} 5833}
4960 5834
4961static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *)) 5835u64
5836nfsd_inject_print_locks(void)
5837{
5838 struct nfs4_client *clp;
5839 u64 count = 0;
5840 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5841 nfsd_net_id);
5842
5843 if (!nfsd_netns_ready(nn))
5844 return 0;
5845
5846 spin_lock(&nn->client_lock);
5847 list_for_each_entry(clp, &nn->client_lru, cl_lru)
5848 count += nfsd_print_client_locks(clp);
5849 spin_unlock(&nn->client_lock);
5850
5851 return count;
5852}
5853
5854static void
5855nfsd_reap_locks(struct list_head *reaplist)
5856{
5857 struct nfs4_client *clp;
5858 struct nfs4_ol_stateid *stp, *next;
5859
5860 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
5861 list_del_init(&stp->st_locks);
5862 clp = stp->st_stid.sc_client;
5863 nfs4_put_stid(&stp->st_stid);
5864 put_client(clp);
5865 }
5866}
5867
5868u64
5869nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
5870{
5871 unsigned int count = 0;
5872 struct nfs4_client *clp;
5873 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5874 nfsd_net_id);
5875 LIST_HEAD(reaplist);
5876
5877 if (!nfsd_netns_ready(nn))
5878 return count;
5879
5880 spin_lock(&nn->client_lock);
5881 clp = nfsd_find_client(addr, addr_size);
5882 if (clp)
5883 count = nfsd_collect_client_locks(clp, &reaplist, 0);
5884 spin_unlock(&nn->client_lock);
5885 nfsd_reap_locks(&reaplist);
5886 return count;
5887}
5888
5889u64
5890nfsd_inject_forget_locks(u64 max)
5891{
5892 u64 count = 0;
5893 struct nfs4_client *clp;
5894 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5895 nfsd_net_id);
5896 LIST_HEAD(reaplist);
5897
5898 if (!nfsd_netns_ready(nn))
5899 return count;
5900
5901 spin_lock(&nn->client_lock);
5902 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5903 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
5904 if (max != 0 && count >= max)
5905 break;
5906 }
5907 spin_unlock(&nn->client_lock);
5908 nfsd_reap_locks(&reaplist);
5909 return count;
5910}
5911
5912static u64
5913nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
5914 struct list_head *collect,
5915 void (*func)(struct nfs4_openowner *))
4962{ 5916{
4963 struct nfs4_openowner *oop, *next; 5917 struct nfs4_openowner *oop, *next;
5918 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5919 nfsd_net_id);
4964 u64 count = 0; 5920 u64 count = 0;
4965 5921
5922 lockdep_assert_held(&nn->client_lock);
5923
5924 spin_lock(&clp->cl_lock);
4966 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) { 5925 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
4967 if (func) 5926 if (func) {
4968 func(oop); 5927 func(oop);
4969 if (++count == max) 5928 if (collect) {
5929 atomic_inc(&clp->cl_refcount);
5930 list_add(&oop->oo_perclient, collect);
5931 }
5932 }
5933 ++count;
5934 /*
5935 * Despite the fact that these functions deal with
5936 * 64-bit integers for "count", we must ensure that
5937 * it doesn't blow up the clp->cl_refcount. Throw a
5938 * warning if we start to approach INT_MAX here.
5939 */
5940 WARN_ON_ONCE(count == (INT_MAX / 2));
5941 if (count == max)
4970 break; 5942 break;
4971 } 5943 }
5944 spin_unlock(&clp->cl_lock);
4972 5945
4973 return count; 5946 return count;
4974} 5947}
4975 5948
4976u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max) 5949static u64
5950nfsd_print_client_openowners(struct nfs4_client *clp)
4977{ 5951{
4978 return nfsd_foreach_client_open(clp, max, release_openowner); 5952 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
5953
5954 nfsd_print_count(clp, count, "openowners");
5955 return count;
4979} 5956}
4980 5957
4981u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max) 5958static u64
5959nfsd_collect_client_openowners(struct nfs4_client *clp,
5960 struct list_head *collect, u64 max)
4982{ 5961{
4983 u64 count = nfsd_foreach_client_open(clp, max, NULL); 5962 return nfsd_foreach_client_openowner(clp, max, collect,
4984 nfsd_print_count(clp, count, "open files"); 5963 unhash_openowner_locked);
4985 return count;
4986} 5964}
4987 5965
4988static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, 5966u64
4989 struct list_head *victims) 5967nfsd_inject_print_openowners(void)
4990{ 5968{
4991 struct nfs4_delegation *dp, *next; 5969 struct nfs4_client *clp;
4992 u64 count = 0; 5970 u64 count = 0;
5971 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
5972 nfsd_net_id);
5973
5974 if (!nfsd_netns_ready(nn))
5975 return 0;
5976
5977 spin_lock(&nn->client_lock);
5978 list_for_each_entry(clp, &nn->client_lru, cl_lru)
5979 count += nfsd_print_client_openowners(clp);
5980 spin_unlock(&nn->client_lock);
4993 5981
4994 lockdep_assert_held(&state_lock);
4995 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
4996 if (victims)
4997 list_move(&dp->dl_recall_lru, victims);
4998 if (++count == max)
4999 break;
5000 }
5001 return count; 5982 return count;
5002} 5983}
5003 5984
5004u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max) 5985static void
5986nfsd_reap_openowners(struct list_head *reaplist)
5005{ 5987{
5006 struct nfs4_delegation *dp, *next; 5988 struct nfs4_client *clp;
5007 LIST_HEAD(victims); 5989 struct nfs4_openowner *oop, *next;
5008 u64 count;
5009 5990
5010 spin_lock(&state_lock); 5991 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
5011 count = nfsd_find_all_delegations(clp, max, &victims); 5992 list_del_init(&oop->oo_perclient);
5012 spin_unlock(&state_lock); 5993 clp = oop->oo_owner.so_client;
5994 release_openowner(oop);
5995 put_client(clp);
5996 }
5997}
5013 5998
5014 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) 5999u64
5015 revoke_delegation(dp); 6000nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6001 size_t addr_size)
6002{
6003 unsigned int count = 0;
6004 struct nfs4_client *clp;
6005 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6006 nfsd_net_id);
6007 LIST_HEAD(reaplist);
5016 6008
6009 if (!nfsd_netns_ready(nn))
6010 return count;
6011
6012 spin_lock(&nn->client_lock);
6013 clp = nfsd_find_client(addr, addr_size);
6014 if (clp)
6015 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6016 spin_unlock(&nn->client_lock);
6017 nfsd_reap_openowners(&reaplist);
5017 return count; 6018 return count;
5018} 6019}
5019 6020
5020u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max) 6021u64
6022nfsd_inject_forget_openowners(u64 max)
5021{ 6023{
5022 struct nfs4_delegation *dp, *next; 6024 u64 count = 0;
5023 LIST_HEAD(victims); 6025 struct nfs4_client *clp;
5024 u64 count; 6026 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6027 nfsd_net_id);
6028 LIST_HEAD(reaplist);
5025 6029
5026 spin_lock(&state_lock); 6030 if (!nfsd_netns_ready(nn))
5027 count = nfsd_find_all_delegations(clp, max, &victims); 6031 return count;
5028 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
5029 nfsd_break_one_deleg(dp);
5030 spin_unlock(&state_lock);
5031 6032
6033 spin_lock(&nn->client_lock);
6034 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6035 count += nfsd_collect_client_openowners(clp, &reaplist,
6036 max - count);
6037 if (max != 0 && count >= max)
6038 break;
6039 }
6040 spin_unlock(&nn->client_lock);
6041 nfsd_reap_openowners(&reaplist);
5032 return count; 6042 return count;
5033} 6043}
5034 6044
5035u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max) 6045static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6046 struct list_head *victims)
5036{ 6047{
6048 struct nfs4_delegation *dp, *next;
6049 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6050 nfsd_net_id);
5037 u64 count = 0; 6051 u64 count = 0;
5038 6052
6053 lockdep_assert_held(&nn->client_lock);
6054
5039 spin_lock(&state_lock); 6055 spin_lock(&state_lock);
5040 count = nfsd_find_all_delegations(clp, max, NULL); 6056 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6057 if (victims) {
6058 /*
6059 * It's not safe to mess with delegations that have a
6060 * non-zero dl_time. They might have already been broken
6061 * and could be processed by the laundromat outside of
6062 * the state_lock. Just leave them be.
6063 */
6064 if (dp->dl_time != 0)
6065 continue;
6066
6067 atomic_inc(&clp->cl_refcount);
6068 unhash_delegation_locked(dp);
6069 list_add(&dp->dl_recall_lru, victims);
6070 }
6071 ++count;
6072 /*
6073 * Despite the fact that these functions deal with
6074 * 64-bit integers for "count", we must ensure that
6075 * it doesn't blow up the clp->cl_refcount. Throw a
6076 * warning if we start to approach INT_MAX here.
6077 */
6078 WARN_ON_ONCE(count == (INT_MAX / 2));
6079 if (count == max)
6080 break;
6081 }
5041 spin_unlock(&state_lock); 6082 spin_unlock(&state_lock);
6083 return count;
6084}
6085
6086static u64
6087nfsd_print_client_delegations(struct nfs4_client *clp)
6088{
6089 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
5042 6090
5043 nfsd_print_count(clp, count, "delegations"); 6091 nfsd_print_count(clp, count, "delegations");
5044 return count; 6092 return count;
5045} 6093}
5046 6094
5047u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64)) 6095u64
6096nfsd_inject_print_delegations(void)
5048{ 6097{
5049 struct nfs4_client *clp, *next; 6098 struct nfs4_client *clp;
5050 u64 count = 0; 6099 u64 count = 0;
5051 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); 6100 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6101 nfsd_net_id);
5052 6102
5053 if (!nfsd_netns_ready(nn)) 6103 if (!nfsd_netns_ready(nn))
5054 return 0; 6104 return 0;
5055 6105
5056 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) { 6106 spin_lock(&nn->client_lock);
5057 count += func(clp, max - count); 6107 list_for_each_entry(clp, &nn->client_lru, cl_lru)
5058 if ((max != 0) && (count >= max)) 6108 count += nfsd_print_client_delegations(clp);
5059 break; 6109 spin_unlock(&nn->client_lock);
6110
6111 return count;
6112}
6113
6114static void
6115nfsd_forget_delegations(struct list_head *reaplist)
6116{
6117 struct nfs4_client *clp;
6118 struct nfs4_delegation *dp, *next;
6119
6120 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6121 list_del_init(&dp->dl_recall_lru);
6122 clp = dp->dl_stid.sc_client;
6123 revoke_delegation(dp);
6124 put_client(clp);
5060 } 6125 }
6126}
5061 6127
6128u64
6129nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6130 size_t addr_size)
6131{
6132 u64 count = 0;
6133 struct nfs4_client *clp;
6134 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6135 nfsd_net_id);
6136 LIST_HEAD(reaplist);
6137
6138 if (!nfsd_netns_ready(nn))
6139 return count;
6140
6141 spin_lock(&nn->client_lock);
6142 clp = nfsd_find_client(addr, addr_size);
6143 if (clp)
6144 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6145 spin_unlock(&nn->client_lock);
6146
6147 nfsd_forget_delegations(&reaplist);
5062 return count; 6148 return count;
5063} 6149}
5064 6150
5065struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size) 6151u64
6152nfsd_inject_forget_delegations(u64 max)
5066{ 6153{
6154 u64 count = 0;
5067 struct nfs4_client *clp; 6155 struct nfs4_client *clp;
5068 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id); 6156 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6157 nfsd_net_id);
6158 LIST_HEAD(reaplist);
5069 6159
5070 if (!nfsd_netns_ready(nn)) 6160 if (!nfsd_netns_ready(nn))
5071 return NULL; 6161 return count;
5072 6162
6163 spin_lock(&nn->client_lock);
5073 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6164 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5074 if (memcmp(&clp->cl_addr, addr, addr_size) == 0) 6165 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
5075 return clp; 6166 if (max != 0 && count >= max)
6167 break;
5076 } 6168 }
5077 return NULL; 6169 spin_unlock(&nn->client_lock);
6170 nfsd_forget_delegations(&reaplist);
6171 return count;
5078} 6172}
5079 6173
6174static void
6175nfsd_recall_delegations(struct list_head *reaplist)
6176{
6177 struct nfs4_client *clp;
6178 struct nfs4_delegation *dp, *next;
6179
6180 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6181 list_del_init(&dp->dl_recall_lru);
6182 clp = dp->dl_stid.sc_client;
6183 /*
6184 * We skipped all entries that had a zero dl_time before,
6185 * so we can now reset the dl_time back to 0. If a delegation
6186 * break comes in now, then it won't make any difference since
6187 * we're recalling it either way.
6188 */
6189 spin_lock(&state_lock);
6190 dp->dl_time = 0;
6191 spin_unlock(&state_lock);
6192 nfsd_break_one_deleg(dp);
6193 put_client(clp);
6194 }
6195}
6196
6197u64
6198nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
6199 size_t addr_size)
6200{
6201 u64 count = 0;
6202 struct nfs4_client *clp;
6203 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6204 nfsd_net_id);
6205 LIST_HEAD(reaplist);
6206
6207 if (!nfsd_netns_ready(nn))
6208 return count;
6209
6210 spin_lock(&nn->client_lock);
6211 clp = nfsd_find_client(addr, addr_size);
6212 if (clp)
6213 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6214 spin_unlock(&nn->client_lock);
6215
6216 nfsd_recall_delegations(&reaplist);
6217 return count;
6218}
6219
6220u64
6221nfsd_inject_recall_delegations(u64 max)
6222{
6223 u64 count = 0;
6224 struct nfs4_client *clp, *next;
6225 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6226 nfsd_net_id);
6227 LIST_HEAD(reaplist);
6228
6229 if (!nfsd_netns_ready(nn))
6230 return count;
6231
6232 spin_lock(&nn->client_lock);
6233 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6234 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6235 if (max != 0 && ++count >= max)
6236 break;
6237 }
6238 spin_unlock(&nn->client_lock);
6239 nfsd_recall_delegations(&reaplist);
6240 return count;
6241}
5080#endif /* CONFIG_NFSD_FAULT_INJECTION */ 6242#endif /* CONFIG_NFSD_FAULT_INJECTION */
5081 6243
5082/* 6244/*
@@ -5113,14 +6275,6 @@ static int nfs4_state_create_net(struct net *net)
5113 CLIENT_HASH_SIZE, GFP_KERNEL); 6275 CLIENT_HASH_SIZE, GFP_KERNEL);
5114 if (!nn->unconf_id_hashtbl) 6276 if (!nn->unconf_id_hashtbl)
5115 goto err_unconf_id; 6277 goto err_unconf_id;
5116 nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
5117 OWNER_HASH_SIZE, GFP_KERNEL);
5118 if (!nn->ownerstr_hashtbl)
5119 goto err_ownerstr;
5120 nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) *
5121 LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL);
5122 if (!nn->lockowner_ino_hashtbl)
5123 goto err_lockowner_ino;
5124 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) * 6278 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
5125 SESSION_HASH_SIZE, GFP_KERNEL); 6279 SESSION_HASH_SIZE, GFP_KERNEL);
5126 if (!nn->sessionid_hashtbl) 6280 if (!nn->sessionid_hashtbl)
@@ -5130,10 +6284,6 @@ static int nfs4_state_create_net(struct net *net)
5130 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 6284 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
5131 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 6285 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
5132 } 6286 }
5133 for (i = 0; i < OWNER_HASH_SIZE; i++)
5134 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
5135 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
5136 INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]);
5137 for (i = 0; i < SESSION_HASH_SIZE; i++) 6287 for (i = 0; i < SESSION_HASH_SIZE; i++)
5138 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 6288 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
5139 nn->conf_name_tree = RB_ROOT; 6289 nn->conf_name_tree = RB_ROOT;
@@ -5149,10 +6299,6 @@ static int nfs4_state_create_net(struct net *net)
5149 return 0; 6299 return 0;
5150 6300
5151err_sessionid: 6301err_sessionid:
5152 kfree(nn->lockowner_ino_hashtbl);
5153err_lockowner_ino:
5154 kfree(nn->ownerstr_hashtbl);
5155err_ownerstr:
5156 kfree(nn->unconf_id_hashtbl); 6302 kfree(nn->unconf_id_hashtbl);
5157err_unconf_id: 6303err_unconf_id:
5158 kfree(nn->conf_id_hashtbl); 6304 kfree(nn->conf_id_hashtbl);
@@ -5182,8 +6328,6 @@ nfs4_state_destroy_net(struct net *net)
5182 } 6328 }
5183 6329
5184 kfree(nn->sessionid_hashtbl); 6330 kfree(nn->sessionid_hashtbl);
5185 kfree(nn->lockowner_ino_hashtbl);
5186 kfree(nn->ownerstr_hashtbl);
5187 kfree(nn->unconf_id_hashtbl); 6331 kfree(nn->unconf_id_hashtbl);
5188 kfree(nn->conf_id_hashtbl); 6332 kfree(nn->conf_id_hashtbl);
5189 put_net(net); 6333 put_net(net);
@@ -5247,22 +6391,22 @@ nfs4_state_shutdown_net(struct net *net)
5247 cancel_delayed_work_sync(&nn->laundromat_work); 6391 cancel_delayed_work_sync(&nn->laundromat_work);
5248 locks_end_grace(&nn->nfsd4_manager); 6392 locks_end_grace(&nn->nfsd4_manager);
5249 6393
5250 nfs4_lock_state();
5251 INIT_LIST_HEAD(&reaplist); 6394 INIT_LIST_HEAD(&reaplist);
5252 spin_lock(&state_lock); 6395 spin_lock(&state_lock);
5253 list_for_each_safe(pos, next, &nn->del_recall_lru) { 6396 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5254 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6397 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5255 list_move(&dp->dl_recall_lru, &reaplist); 6398 unhash_delegation_locked(dp);
6399 list_add(&dp->dl_recall_lru, &reaplist);
5256 } 6400 }
5257 spin_unlock(&state_lock); 6401 spin_unlock(&state_lock);
5258 list_for_each_safe(pos, next, &reaplist) { 6402 list_for_each_safe(pos, next, &reaplist) {
5259 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6403 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5260 destroy_delegation(dp); 6404 list_del_init(&dp->dl_recall_lru);
6405 nfs4_put_stid(&dp->dl_stid);
5261 } 6406 }
5262 6407
5263 nfsd4_client_tracking_exit(net); 6408 nfsd4_client_tracking_exit(net);
5264 nfs4_state_destroy_net(net); 6409 nfs4_state_destroy_net(net);
5265 nfs4_unlock_state();
5266} 6410}
5267 6411
5268void 6412void