diff options
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r-- | fs/nfsd/nfs4state.c | 1117 |
1 files changed, 677 insertions, 440 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index cf0d2ffb3c84..e98f3c2e9492 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -33,7 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/file.h> | 35 | #include <linux/file.h> |
36 | #include <linux/smp_lock.h> | 36 | #include <linux/fs.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/namei.h> | 38 | #include <linux/namei.h> |
39 | #include <linux/swap.h> | 39 | #include <linux/swap.h> |
@@ -148,7 +148,7 @@ static struct list_head ownerstr_hashtbl[OWNER_HASH_SIZE]; | |||
148 | /* hash table for nfs4_file */ | 148 | /* hash table for nfs4_file */ |
149 | #define FILE_HASH_BITS 8 | 149 | #define FILE_HASH_BITS 8 |
150 | #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) | 150 | #define FILE_HASH_SIZE (1 << FILE_HASH_BITS) |
151 | #define FILE_HASH_MASK (FILE_HASH_SIZE - 1) | 151 | |
152 | /* hash table for (open)nfs4_stateid */ | 152 | /* hash table for (open)nfs4_stateid */ |
153 | #define STATEID_HASH_BITS 10 | 153 | #define STATEID_HASH_BITS 10 |
154 | #define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS) | 154 | #define STATEID_HASH_SIZE (1 << STATEID_HASH_BITS) |
@@ -207,7 +207,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
207 | { | 207 | { |
208 | struct nfs4_delegation *dp; | 208 | struct nfs4_delegation *dp; |
209 | struct nfs4_file *fp = stp->st_file; | 209 | struct nfs4_file *fp = stp->st_file; |
210 | struct nfs4_cb_conn *cb = &stp->st_stateowner->so_client->cl_cb_conn; | ||
211 | 210 | ||
212 | dprintk("NFSD alloc_init_deleg\n"); | 211 | dprintk("NFSD alloc_init_deleg\n"); |
213 | /* | 212 | /* |
@@ -231,10 +230,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
231 | dp->dl_client = clp; | 230 | dp->dl_client = clp; |
232 | get_nfs4_file(fp); | 231 | get_nfs4_file(fp); |
233 | dp->dl_file = fp; | 232 | dp->dl_file = fp; |
234 | nfs4_file_get_access(fp, O_RDONLY); | ||
235 | dp->dl_flock = NULL; | ||
236 | dp->dl_type = type; | 233 | dp->dl_type = type; |
237 | dp->dl_ident = cb->cb_ident; | ||
238 | dp->dl_stateid.si_boot = boot_time; | 234 | dp->dl_stateid.si_boot = boot_time; |
239 | dp->dl_stateid.si_stateownerid = current_delegid++; | 235 | dp->dl_stateid.si_stateownerid = current_delegid++; |
240 | dp->dl_stateid.si_fileid = 0; | 236 | dp->dl_stateid.si_fileid = 0; |
@@ -242,8 +238,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
242 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); | 238 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); |
243 | dp->dl_time = 0; | 239 | dp->dl_time = 0; |
244 | atomic_set(&dp->dl_count, 1); | 240 | atomic_set(&dp->dl_count, 1); |
245 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
246 | list_add(&dp->dl_perclnt, &clp->cl_delegations); | ||
247 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); | 241 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); |
248 | return dp; | 242 | return dp; |
249 | } | 243 | } |
@@ -259,32 +253,26 @@ nfs4_put_delegation(struct nfs4_delegation *dp) | |||
259 | } | 253 | } |
260 | } | 254 | } |
261 | 255 | ||
262 | /* Remove the associated file_lock first, then remove the delegation. | 256 | static void nfs4_put_deleg_lease(struct nfs4_file *fp) |
263 | * lease_modify() is called to remove the FS_LEASE file_lock from | ||
264 | * the i_flock list, eventually calling nfsd's lock_manager | ||
265 | * fl_release_callback. | ||
266 | */ | ||
267 | static void | ||
268 | nfs4_close_delegation(struct nfs4_delegation *dp) | ||
269 | { | 257 | { |
270 | struct file *filp = find_readable_file(dp->dl_file); | 258 | if (atomic_dec_and_test(&fp->fi_delegees)) { |
271 | 259 | vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); | |
272 | dprintk("NFSD: close_delegation dp %p\n",dp); | 260 | fp->fi_lease = NULL; |
273 | if (dp->dl_flock) | 261 | fput(fp->fi_deleg_file); |
274 | vfs_setlease(filp, F_UNLCK, &dp->dl_flock); | 262 | fp->fi_deleg_file = NULL; |
275 | nfs4_file_put_access(dp->dl_file, O_RDONLY); | 263 | } |
276 | } | 264 | } |
277 | 265 | ||
278 | /* Called under the state lock. */ | 266 | /* Called under the state lock. */ |
279 | static void | 267 | static void |
280 | unhash_delegation(struct nfs4_delegation *dp) | 268 | unhash_delegation(struct nfs4_delegation *dp) |
281 | { | 269 | { |
282 | list_del_init(&dp->dl_perfile); | ||
283 | list_del_init(&dp->dl_perclnt); | 270 | list_del_init(&dp->dl_perclnt); |
284 | spin_lock(&recall_lock); | 271 | spin_lock(&recall_lock); |
272 | list_del_init(&dp->dl_perfile); | ||
285 | list_del_init(&dp->dl_recall_lru); | 273 | list_del_init(&dp->dl_recall_lru); |
286 | spin_unlock(&recall_lock); | 274 | spin_unlock(&recall_lock); |
287 | nfs4_close_delegation(dp); | 275 | nfs4_put_deleg_lease(dp->dl_file); |
288 | nfs4_put_delegation(dp); | 276 | nfs4_put_delegation(dp); |
289 | } | 277 | } |
290 | 278 | ||
@@ -329,64 +317,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; | |||
329 | static struct list_head client_lru; | 317 | static struct list_head client_lru; |
330 | static struct list_head close_lru; | 318 | static struct list_head close_lru; |
331 | 319 | ||
332 | static void unhash_generic_stateid(struct nfs4_stateid *stp) | ||
333 | { | ||
334 | list_del(&stp->st_hash); | ||
335 | list_del(&stp->st_perfile); | ||
336 | list_del(&stp->st_perstateowner); | ||
337 | } | ||
338 | |||
339 | static void free_generic_stateid(struct nfs4_stateid *stp) | ||
340 | { | ||
341 | put_nfs4_file(stp->st_file); | ||
342 | kmem_cache_free(stateid_slab, stp); | ||
343 | } | ||
344 | |||
345 | static void release_lock_stateid(struct nfs4_stateid *stp) | ||
346 | { | ||
347 | struct file *file; | ||
348 | |||
349 | unhash_generic_stateid(stp); | ||
350 | file = find_any_file(stp->st_file); | ||
351 | if (file) | ||
352 | locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); | ||
353 | free_generic_stateid(stp); | ||
354 | } | ||
355 | |||
356 | static void unhash_lockowner(struct nfs4_stateowner *sop) | ||
357 | { | ||
358 | struct nfs4_stateid *stp; | ||
359 | |||
360 | list_del(&sop->so_idhash); | ||
361 | list_del(&sop->so_strhash); | ||
362 | list_del(&sop->so_perstateid); | ||
363 | while (!list_empty(&sop->so_stateids)) { | ||
364 | stp = list_first_entry(&sop->so_stateids, | ||
365 | struct nfs4_stateid, st_perstateowner); | ||
366 | release_lock_stateid(stp); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | static void release_lockowner(struct nfs4_stateowner *sop) | ||
371 | { | ||
372 | unhash_lockowner(sop); | ||
373 | nfs4_put_stateowner(sop); | ||
374 | } | ||
375 | |||
376 | static void | ||
377 | release_stateid_lockowners(struct nfs4_stateid *open_stp) | ||
378 | { | ||
379 | struct nfs4_stateowner *lock_sop; | ||
380 | |||
381 | while (!list_empty(&open_stp->st_lockowners)) { | ||
382 | lock_sop = list_entry(open_stp->st_lockowners.next, | ||
383 | struct nfs4_stateowner, so_perstateid); | ||
384 | /* list_del(&open_stp->st_lockowners); */ | ||
385 | BUG_ON(lock_sop->so_is_open_owner); | ||
386 | release_lockowner(lock_sop); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* | 320 | /* |
391 | * We store the NONE, READ, WRITE, and BOTH bits separately in the | 321 | * We store the NONE, READ, WRITE, and BOTH bits separately in the |
392 | * st_{access,deny}_bmap field of the stateid, in order to track not | 322 | * st_{access,deny}_bmap field of the stateid, in order to track not |
@@ -459,13 +389,74 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp) | |||
459 | return nfs4_access_to_omode(access); | 389 | return nfs4_access_to_omode(access); |
460 | } | 390 | } |
461 | 391 | ||
462 | static void release_open_stateid(struct nfs4_stateid *stp) | 392 | static void unhash_generic_stateid(struct nfs4_stateid *stp) |
393 | { | ||
394 | list_del(&stp->st_hash); | ||
395 | list_del(&stp->st_perfile); | ||
396 | list_del(&stp->st_perstateowner); | ||
397 | } | ||
398 | |||
399 | static void free_generic_stateid(struct nfs4_stateid *stp) | ||
463 | { | 400 | { |
464 | int oflag = nfs4_access_bmap_to_omode(stp); | 401 | int oflag; |
402 | |||
403 | if (stp->st_access_bmap) { | ||
404 | oflag = nfs4_access_bmap_to_omode(stp); | ||
405 | nfs4_file_put_access(stp->st_file, oflag); | ||
406 | } | ||
407 | put_nfs4_file(stp->st_file); | ||
408 | kmem_cache_free(stateid_slab, stp); | ||
409 | } | ||
410 | |||
411 | static void release_lock_stateid(struct nfs4_stateid *stp) | ||
412 | { | ||
413 | struct file *file; | ||
465 | 414 | ||
466 | unhash_generic_stateid(stp); | 415 | unhash_generic_stateid(stp); |
416 | file = find_any_file(stp->st_file); | ||
417 | if (file) | ||
418 | locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); | ||
419 | free_generic_stateid(stp); | ||
420 | } | ||
421 | |||
422 | static void unhash_lockowner(struct nfs4_stateowner *sop) | ||
423 | { | ||
424 | struct nfs4_stateid *stp; | ||
425 | |||
426 | list_del(&sop->so_idhash); | ||
427 | list_del(&sop->so_strhash); | ||
428 | list_del(&sop->so_perstateid); | ||
429 | while (!list_empty(&sop->so_stateids)) { | ||
430 | stp = list_first_entry(&sop->so_stateids, | ||
431 | struct nfs4_stateid, st_perstateowner); | ||
432 | release_lock_stateid(stp); | ||
433 | } | ||
434 | } | ||
435 | |||
436 | static void release_lockowner(struct nfs4_stateowner *sop) | ||
437 | { | ||
438 | unhash_lockowner(sop); | ||
439 | nfs4_put_stateowner(sop); | ||
440 | } | ||
441 | |||
442 | static void | ||
443 | release_stateid_lockowners(struct nfs4_stateid *open_stp) | ||
444 | { | ||
445 | struct nfs4_stateowner *lock_sop; | ||
446 | |||
447 | while (!list_empty(&open_stp->st_lockowners)) { | ||
448 | lock_sop = list_entry(open_stp->st_lockowners.next, | ||
449 | struct nfs4_stateowner, so_perstateid); | ||
450 | /* list_del(&open_stp->st_lockowners); */ | ||
451 | BUG_ON(lock_sop->so_is_open_owner); | ||
452 | release_lockowner(lock_sop); | ||
453 | } | ||
454 | } | ||
455 | |||
456 | static void release_open_stateid(struct nfs4_stateid *stp) | ||
457 | { | ||
458 | unhash_generic_stateid(stp); | ||
467 | release_stateid_lockowners(stp); | 459 | release_stateid_lockowners(stp); |
468 | nfs4_file_put_access(stp->st_file, oflag); | ||
469 | free_generic_stateid(stp); | 460 | free_generic_stateid(stp); |
470 | } | 461 | } |
471 | 462 | ||
@@ -535,171 +526,279 @@ gen_sessionid(struct nfsd4_session *ses) | |||
535 | */ | 526 | */ |
536 | #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) | 527 | #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) |
537 | 528 | ||
529 | static void | ||
530 | free_session_slots(struct nfsd4_session *ses) | ||
531 | { | ||
532 | int i; | ||
533 | |||
534 | for (i = 0; i < ses->se_fchannel.maxreqs; i++) | ||
535 | kfree(ses->se_slots[i]); | ||
536 | } | ||
537 | |||
538 | /* | 538 | /* |
539 | * Give the client the number of ca_maxresponsesize_cached slots it | 539 | * We don't actually need to cache the rpc and session headers, so we |
540 | * requests, of size bounded by NFSD_SLOT_CACHE_SIZE, | 540 | * can allocate a little less for each slot: |
541 | * NFSD_MAX_MEM_PER_SESSION, and nfsd_drc_max_mem. Do not allow more | 541 | */ |
542 | * than NFSD_MAX_SLOTS_PER_SESSION. | 542 | static inline int slot_bytes(struct nfsd4_channel_attrs *ca) |
543 | * | 543 | { |
544 | * If we run out of reserved DRC memory we should (up to a point) | 544 | return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; |
545 | } | ||
546 | |||
547 | static int nfsd4_sanitize_slot_size(u32 size) | ||
548 | { | ||
549 | size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ | ||
550 | size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); | ||
551 | |||
552 | return size; | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * XXX: If we run out of reserved DRC memory we could (up to a point) | ||
545 | * re-negotiate active sessions and reduce their slot usage to make | 557 | * re-negotiate active sessions and reduce their slot usage to make |
546 | * rooom for new connections. For now we just fail the create session. | 558 | * rooom for new connections. For now we just fail the create session. |
547 | */ | 559 | */ |
548 | static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan) | 560 | static int nfsd4_get_drc_mem(int slotsize, u32 num) |
549 | { | 561 | { |
550 | int mem, size = fchan->maxresp_cached; | 562 | int avail; |
551 | 563 | ||
552 | if (fchan->maxreqs < 1) | 564 | num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION); |
553 | return nfserr_inval; | ||
554 | 565 | ||
555 | if (size < NFSD_MIN_HDR_SEQ_SZ) | 566 | spin_lock(&nfsd_drc_lock); |
556 | size = NFSD_MIN_HDR_SEQ_SZ; | 567 | avail = min_t(int, NFSD_MAX_MEM_PER_SESSION, |
557 | size -= NFSD_MIN_HDR_SEQ_SZ; | 568 | nfsd_drc_max_mem - nfsd_drc_mem_used); |
558 | if (size > NFSD_SLOT_CACHE_SIZE) | 569 | num = min_t(int, num, avail / slotsize); |
559 | size = NFSD_SLOT_CACHE_SIZE; | 570 | nfsd_drc_mem_used += num * slotsize; |
560 | 571 | spin_unlock(&nfsd_drc_lock); | |
561 | /* bound the maxreqs by NFSD_MAX_MEM_PER_SESSION */ | 572 | |
562 | mem = fchan->maxreqs * size; | 573 | return num; |
563 | if (mem > NFSD_MAX_MEM_PER_SESSION) { | 574 | } |
564 | fchan->maxreqs = NFSD_MAX_MEM_PER_SESSION / size; | ||
565 | if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) | ||
566 | fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; | ||
567 | mem = fchan->maxreqs * size; | ||
568 | } | ||
569 | 575 | ||
576 | static void nfsd4_put_drc_mem(int slotsize, int num) | ||
577 | { | ||
570 | spin_lock(&nfsd_drc_lock); | 578 | spin_lock(&nfsd_drc_lock); |
571 | /* bound the total session drc memory ussage */ | 579 | nfsd_drc_mem_used -= slotsize * num; |
572 | if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) { | ||
573 | fchan->maxreqs = (nfsd_drc_max_mem - nfsd_drc_mem_used) / size; | ||
574 | mem = fchan->maxreqs * size; | ||
575 | } | ||
576 | nfsd_drc_mem_used += mem; | ||
577 | spin_unlock(&nfsd_drc_lock); | 580 | spin_unlock(&nfsd_drc_lock); |
581 | } | ||
578 | 582 | ||
579 | if (fchan->maxreqs == 0) | 583 | static struct nfsd4_session *alloc_session(int slotsize, int numslots) |
580 | return nfserr_jukebox; | 584 | { |
585 | struct nfsd4_session *new; | ||
586 | int mem, i; | ||
581 | 587 | ||
582 | fchan->maxresp_cached = size + NFSD_MIN_HDR_SEQ_SZ; | 588 | BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) |
583 | return 0; | 589 | + sizeof(struct nfsd4_session) > PAGE_SIZE); |
590 | mem = numslots * sizeof(struct nfsd4_slot *); | ||
591 | |||
592 | new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); | ||
593 | if (!new) | ||
594 | return NULL; | ||
595 | /* allocate each struct nfsd4_slot and data cache in one piece */ | ||
596 | for (i = 0; i < numslots; i++) { | ||
597 | mem = sizeof(struct nfsd4_slot) + slotsize; | ||
598 | new->se_slots[i] = kzalloc(mem, GFP_KERNEL); | ||
599 | if (!new->se_slots[i]) | ||
600 | goto out_free; | ||
601 | } | ||
602 | return new; | ||
603 | out_free: | ||
604 | while (i--) | ||
605 | kfree(new->se_slots[i]); | ||
606 | kfree(new); | ||
607 | return NULL; | ||
584 | } | 608 | } |
585 | 609 | ||
586 | /* | 610 | static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize) |
587 | * fchan holds the client values on input, and the server values on output | ||
588 | * sv_max_mesg is the maximum payload plus one page for overhead. | ||
589 | */ | ||
590 | static int init_forechannel_attrs(struct svc_rqst *rqstp, | ||
591 | struct nfsd4_channel_attrs *session_fchan, | ||
592 | struct nfsd4_channel_attrs *fchan) | ||
593 | { | 611 | { |
594 | int status = 0; | 612 | u32 maxrpc = nfsd_serv->sv_max_mesg; |
595 | __u32 maxcount = nfsd_serv->sv_max_mesg; | ||
596 | 613 | ||
597 | /* headerpadsz set to zero in encode routine */ | 614 | new->maxreqs = numslots; |
615 | new->maxresp_cached = min_t(u32, req->maxresp_cached, | ||
616 | slotsize + NFSD_MIN_HDR_SEQ_SZ); | ||
617 | new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc); | ||
618 | new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc); | ||
619 | new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND); | ||
620 | } | ||
598 | 621 | ||
599 | /* Use the client's max request and max response size if possible */ | 622 | static void free_conn(struct nfsd4_conn *c) |
600 | if (fchan->maxreq_sz > maxcount) | 623 | { |
601 | fchan->maxreq_sz = maxcount; | 624 | svc_xprt_put(c->cn_xprt); |
602 | session_fchan->maxreq_sz = fchan->maxreq_sz; | 625 | kfree(c); |
626 | } | ||
603 | 627 | ||
604 | if (fchan->maxresp_sz > maxcount) | 628 | static void nfsd4_conn_lost(struct svc_xpt_user *u) |
605 | fchan->maxresp_sz = maxcount; | 629 | { |
606 | session_fchan->maxresp_sz = fchan->maxresp_sz; | 630 | struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); |
631 | struct nfs4_client *clp = c->cn_session->se_client; | ||
607 | 632 | ||
608 | /* Use the client's maxops if possible */ | 633 | spin_lock(&clp->cl_lock); |
609 | if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND) | 634 | if (!list_empty(&c->cn_persession)) { |
610 | fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND; | 635 | list_del(&c->cn_persession); |
611 | session_fchan->maxops = fchan->maxops; | 636 | free_conn(c); |
637 | } | ||
638 | spin_unlock(&clp->cl_lock); | ||
639 | nfsd4_probe_callback(clp); | ||
640 | } | ||
612 | 641 | ||
613 | /* FIXME: Error means no more DRC pages so the server should | 642 | static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) |
614 | * recover pages from existing sessions. For now fail session | 643 | { |
615 | * creation. | 644 | struct nfsd4_conn *conn; |
616 | */ | ||
617 | status = set_forechannel_drc_size(fchan); | ||
618 | 645 | ||
619 | session_fchan->maxresp_cached = fchan->maxresp_cached; | 646 | conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); |
620 | session_fchan->maxreqs = fchan->maxreqs; | 647 | if (!conn) |
648 | return NULL; | ||
649 | svc_xprt_get(rqstp->rq_xprt); | ||
650 | conn->cn_xprt = rqstp->rq_xprt; | ||
651 | conn->cn_flags = flags; | ||
652 | INIT_LIST_HEAD(&conn->cn_xpt_user.list); | ||
653 | return conn; | ||
654 | } | ||
621 | 655 | ||
622 | dprintk("%s status %d\n", __func__, status); | 656 | static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) |
623 | return status; | 657 | { |
658 | conn->cn_session = ses; | ||
659 | list_add(&conn->cn_persession, &ses->se_conns); | ||
624 | } | 660 | } |
625 | 661 | ||
626 | static void | 662 | static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) |
627 | free_session_slots(struct nfsd4_session *ses) | ||
628 | { | 663 | { |
629 | int i; | 664 | struct nfs4_client *clp = ses->se_client; |
630 | 665 | ||
631 | for (i = 0; i < ses->se_fchannel.maxreqs; i++) | 666 | spin_lock(&clp->cl_lock); |
632 | kfree(ses->se_slots[i]); | 667 | __nfsd4_hash_conn(conn, ses); |
668 | spin_unlock(&clp->cl_lock); | ||
633 | } | 669 | } |
634 | 670 | ||
635 | /* | 671 | static int nfsd4_register_conn(struct nfsd4_conn *conn) |
636 | * We don't actually need to cache the rpc and session headers, so we | ||
637 | * can allocate a little less for each slot: | ||
638 | */ | ||
639 | static inline int slot_bytes(struct nfsd4_channel_attrs *ca) | ||
640 | { | 672 | { |
641 | return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; | 673 | conn->cn_xpt_user.callback = nfsd4_conn_lost; |
674 | return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); | ||
642 | } | 675 | } |
643 | 676 | ||
644 | static int | 677 | static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir) |
645 | alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, | ||
646 | struct nfsd4_create_session *cses) | ||
647 | { | 678 | { |
648 | struct nfsd4_session *new, tmp; | 679 | struct nfsd4_conn *conn; |
649 | struct nfsd4_slot *sp; | 680 | int ret; |
650 | int idx, slotsize, cachesize, i; | ||
651 | int status; | ||
652 | 681 | ||
653 | memset(&tmp, 0, sizeof(tmp)); | 682 | conn = alloc_conn(rqstp, dir); |
683 | if (!conn) | ||
684 | return nfserr_jukebox; | ||
685 | nfsd4_hash_conn(conn, ses); | ||
686 | ret = nfsd4_register_conn(conn); | ||
687 | if (ret) | ||
688 | /* oops; xprt is already down: */ | ||
689 | nfsd4_conn_lost(&conn->cn_xpt_user); | ||
690 | return nfs_ok; | ||
691 | } | ||
654 | 692 | ||
655 | /* FIXME: For now, we just accept the client back channel attributes. */ | 693 | static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses) |
656 | tmp.se_bchannel = cses->back_channel; | 694 | { |
657 | status = init_forechannel_attrs(rqstp, &tmp.se_fchannel, | 695 | u32 dir = NFS4_CDFC4_FORE; |
658 | &cses->fore_channel); | ||
659 | if (status) | ||
660 | goto out; | ||
661 | 696 | ||
662 | BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot) | 697 | if (ses->se_flags & SESSION4_BACK_CHAN) |
663 | + sizeof(struct nfsd4_session) > PAGE_SIZE); | 698 | dir |= NFS4_CDFC4_BACK; |
664 | 699 | ||
665 | status = nfserr_jukebox; | 700 | return nfsd4_new_conn(rqstp, ses, dir); |
666 | /* allocate struct nfsd4_session and slot table pointers in one piece */ | 701 | } |
667 | slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *); | ||
668 | new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL); | ||
669 | if (!new) | ||
670 | goto out; | ||
671 | 702 | ||
672 | memcpy(new, &tmp, sizeof(*new)); | 703 | /* must be called under client_lock */ |
704 | static void nfsd4_del_conns(struct nfsd4_session *s) | ||
705 | { | ||
706 | struct nfs4_client *clp = s->se_client; | ||
707 | struct nfsd4_conn *c; | ||
673 | 708 | ||
674 | /* allocate each struct nfsd4_slot and data cache in one piece */ | 709 | spin_lock(&clp->cl_lock); |
675 | cachesize = slot_bytes(&new->se_fchannel); | 710 | while (!list_empty(&s->se_conns)) { |
676 | for (i = 0; i < new->se_fchannel.maxreqs; i++) { | 711 | c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); |
677 | sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL); | 712 | list_del_init(&c->cn_persession); |
678 | if (!sp) | 713 | spin_unlock(&clp->cl_lock); |
679 | goto out_free; | 714 | |
680 | new->se_slots[i] = sp; | 715 | unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); |
716 | free_conn(c); | ||
717 | |||
718 | spin_lock(&clp->cl_lock); | ||
719 | } | ||
720 | spin_unlock(&clp->cl_lock); | ||
721 | } | ||
722 | |||
723 | void free_session(struct kref *kref) | ||
724 | { | ||
725 | struct nfsd4_session *ses; | ||
726 | int mem; | ||
727 | |||
728 | ses = container_of(kref, struct nfsd4_session, se_ref); | ||
729 | nfsd4_del_conns(ses); | ||
730 | spin_lock(&nfsd_drc_lock); | ||
731 | mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel); | ||
732 | nfsd_drc_mem_used -= mem; | ||
733 | spin_unlock(&nfsd_drc_lock); | ||
734 | free_session_slots(ses); | ||
735 | kfree(ses); | ||
736 | } | ||
737 | |||
738 | static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) | ||
739 | { | ||
740 | struct nfsd4_session *new; | ||
741 | struct nfsd4_channel_attrs *fchan = &cses->fore_channel; | ||
742 | int numslots, slotsize; | ||
743 | int status; | ||
744 | int idx; | ||
745 | |||
746 | /* | ||
747 | * Note decreasing slot size below client's request may | ||
748 | * make it difficult for client to function correctly, whereas | ||
749 | * decreasing the number of slots will (just?) affect | ||
750 | * performance. When short on memory we therefore prefer to | ||
751 | * decrease number of slots instead of their size. | ||
752 | */ | ||
753 | slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); | ||
754 | numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); | ||
755 | if (numslots < 1) | ||
756 | return NULL; | ||
757 | |||
758 | new = alloc_session(slotsize, numslots); | ||
759 | if (!new) { | ||
760 | nfsd4_put_drc_mem(slotsize, fchan->maxreqs); | ||
761 | return NULL; | ||
681 | } | 762 | } |
763 | init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize); | ||
682 | 764 | ||
683 | new->se_client = clp; | 765 | new->se_client = clp; |
684 | gen_sessionid(new); | 766 | gen_sessionid(new); |
685 | idx = hash_sessionid(&new->se_sessionid); | ||
686 | memcpy(clp->cl_sessionid.data, new->se_sessionid.data, | ||
687 | NFS4_MAX_SESSIONID_LEN); | ||
688 | 767 | ||
768 | INIT_LIST_HEAD(&new->se_conns); | ||
769 | |||
770 | new->se_cb_seq_nr = 1; | ||
689 | new->se_flags = cses->flags; | 771 | new->se_flags = cses->flags; |
772 | new->se_cb_prog = cses->callback_prog; | ||
690 | kref_init(&new->se_ref); | 773 | kref_init(&new->se_ref); |
774 | idx = hash_sessionid(&new->se_sessionid); | ||
691 | spin_lock(&client_lock); | 775 | spin_lock(&client_lock); |
692 | list_add(&new->se_hash, &sessionid_hashtbl[idx]); | 776 | list_add(&new->se_hash, &sessionid_hashtbl[idx]); |
777 | spin_lock(&clp->cl_lock); | ||
693 | list_add(&new->se_perclnt, &clp->cl_sessions); | 778 | list_add(&new->se_perclnt, &clp->cl_sessions); |
779 | spin_unlock(&clp->cl_lock); | ||
694 | spin_unlock(&client_lock); | 780 | spin_unlock(&client_lock); |
695 | 781 | ||
696 | status = nfs_ok; | 782 | status = nfsd4_new_conn_from_crses(rqstp, new); |
697 | out: | 783 | /* whoops: benny points out, status is ignored! (err, or bogus) */ |
698 | return status; | 784 | if (status) { |
699 | out_free: | 785 | free_session(&new->se_ref); |
700 | free_session_slots(new); | 786 | return NULL; |
701 | kfree(new); | 787 | } |
702 | goto out; | 788 | if (cses->flags & SESSION4_BACK_CHAN) { |
789 | struct sockaddr *sa = svc_addr(rqstp); | ||
790 | /* | ||
791 | * This is a little silly; with sessions there's no real | ||
792 | * use for the callback address. Use the peer address | ||
793 | * as a reasonable default for now, but consider fixing | ||
794 | * the rpc client not to require an address in the | ||
795 | * future: | ||
796 | */ | ||
797 | rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); | ||
798 | clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); | ||
799 | } | ||
800 | nfsd4_probe_callback(clp); | ||
801 | return new; | ||
703 | } | 802 | } |
704 | 803 | ||
705 | /* caller must hold client_lock */ | 804 | /* caller must hold client_lock */ |
@@ -728,22 +827,9 @@ static void | |||
728 | unhash_session(struct nfsd4_session *ses) | 827 | unhash_session(struct nfsd4_session *ses) |
729 | { | 828 | { |
730 | list_del(&ses->se_hash); | 829 | list_del(&ses->se_hash); |
830 | spin_lock(&ses->se_client->cl_lock); | ||
731 | list_del(&ses->se_perclnt); | 831 | list_del(&ses->se_perclnt); |
732 | } | 832 | spin_unlock(&ses->se_client->cl_lock); |
733 | |||
734 | void | ||
735 | free_session(struct kref *kref) | ||
736 | { | ||
737 | struct nfsd4_session *ses; | ||
738 | int mem; | ||
739 | |||
740 | ses = container_of(kref, struct nfsd4_session, se_ref); | ||
741 | spin_lock(&nfsd_drc_lock); | ||
742 | mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel); | ||
743 | nfsd_drc_mem_used -= mem; | ||
744 | spin_unlock(&nfsd_drc_lock); | ||
745 | free_session_slots(ses); | ||
746 | kfree(ses); | ||
747 | } | 833 | } |
748 | 834 | ||
749 | /* must be called under the client_lock */ | 835 | /* must be called under the client_lock */ |
@@ -812,6 +898,13 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
812 | static inline void | 898 | static inline void |
813 | free_client(struct nfs4_client *clp) | 899 | free_client(struct nfs4_client *clp) |
814 | { | 900 | { |
901 | while (!list_empty(&clp->cl_sessions)) { | ||
902 | struct nfsd4_session *ses; | ||
903 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | ||
904 | se_perclnt); | ||
905 | list_del(&ses->se_perclnt); | ||
906 | nfsd4_put_session(ses); | ||
907 | } | ||
815 | if (clp->cl_cred.cr_group_info) | 908 | if (clp->cl_cred.cr_group_info) |
816 | put_group_info(clp->cl_cred.cr_group_info); | 909 | put_group_info(clp->cl_cred.cr_group_info); |
817 | kfree(clp->cl_principal); | 910 | kfree(clp->cl_principal); |
@@ -838,15 +931,14 @@ release_session_client(struct nfsd4_session *session) | |||
838 | static inline void | 931 | static inline void |
839 | unhash_client_locked(struct nfs4_client *clp) | 932 | unhash_client_locked(struct nfs4_client *clp) |
840 | { | 933 | { |
934 | struct nfsd4_session *ses; | ||
935 | |||
841 | mark_client_expired(clp); | 936 | mark_client_expired(clp); |
842 | list_del(&clp->cl_lru); | 937 | list_del(&clp->cl_lru); |
843 | while (!list_empty(&clp->cl_sessions)) { | 938 | spin_lock(&clp->cl_lock); |
844 | struct nfsd4_session *ses; | 939 | list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) |
845 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | 940 | list_del_init(&ses->se_hash); |
846 | se_perclnt); | 941 | spin_unlock(&clp->cl_lock); |
847 | unhash_session(ses); | ||
848 | nfsd4_put_session(ses); | ||
849 | } | ||
850 | } | 942 | } |
851 | 943 | ||
852 | static void | 944 | static void |
@@ -860,8 +952,6 @@ expire_client(struct nfs4_client *clp) | |||
860 | spin_lock(&recall_lock); | 952 | spin_lock(&recall_lock); |
861 | while (!list_empty(&clp->cl_delegations)) { | 953 | while (!list_empty(&clp->cl_delegations)) { |
862 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); | 954 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); |
863 | dprintk("NFSD: expire client. dp %p, fp %p\n", dp, | ||
864 | dp->dl_flock); | ||
865 | list_del_init(&dp->dl_perclnt); | 955 | list_del_init(&dp->dl_perclnt); |
866 | list_move(&dp->dl_recall_lru, &reaplist); | 956 | list_move(&dp->dl_recall_lru, &reaplist); |
867 | } | 957 | } |
@@ -875,7 +965,7 @@ expire_client(struct nfs4_client *clp) | |||
875 | sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient); | 965 | sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient); |
876 | release_openowner(sop); | 966 | release_openowner(sop); |
877 | } | 967 | } |
878 | nfsd4_set_callback_client(clp, NULL); | 968 | nfsd4_shutdown_callback(clp); |
879 | if (clp->cl_cb_conn.cb_xprt) | 969 | if (clp->cl_cb_conn.cb_xprt) |
880 | svc_xprt_put(clp->cl_cb_conn.cb_xprt); | 970 | svc_xprt_put(clp->cl_cb_conn.cb_xprt); |
881 | list_del(&clp->cl_idhash); | 971 | list_del(&clp->cl_idhash); |
@@ -960,6 +1050,8 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, | |||
960 | if (clp == NULL) | 1050 | if (clp == NULL) |
961 | return NULL; | 1051 | return NULL; |
962 | 1052 | ||
1053 | INIT_LIST_HEAD(&clp->cl_sessions); | ||
1054 | |||
963 | princ = svc_gss_principal(rqstp); | 1055 | princ = svc_gss_principal(rqstp); |
964 | if (princ) { | 1056 | if (princ) { |
965 | clp->cl_principal = kstrdup(princ, GFP_KERNEL); | 1057 | clp->cl_principal = kstrdup(princ, GFP_KERNEL); |
@@ -971,13 +1063,15 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, | |||
971 | 1063 | ||
972 | memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); | 1064 | memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); |
973 | atomic_set(&clp->cl_refcount, 0); | 1065 | atomic_set(&clp->cl_refcount, 0); |
974 | atomic_set(&clp->cl_cb_set, 0); | 1066 | clp->cl_cb_state = NFSD4_CB_UNKNOWN; |
975 | INIT_LIST_HEAD(&clp->cl_idhash); | 1067 | INIT_LIST_HEAD(&clp->cl_idhash); |
976 | INIT_LIST_HEAD(&clp->cl_strhash); | 1068 | INIT_LIST_HEAD(&clp->cl_strhash); |
977 | INIT_LIST_HEAD(&clp->cl_openowners); | 1069 | INIT_LIST_HEAD(&clp->cl_openowners); |
978 | INIT_LIST_HEAD(&clp->cl_delegations); | 1070 | INIT_LIST_HEAD(&clp->cl_delegations); |
979 | INIT_LIST_HEAD(&clp->cl_sessions); | ||
980 | INIT_LIST_HEAD(&clp->cl_lru); | 1071 | INIT_LIST_HEAD(&clp->cl_lru); |
1072 | INIT_LIST_HEAD(&clp->cl_callbacks); | ||
1073 | spin_lock_init(&clp->cl_lock); | ||
1074 | INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); | ||
981 | clp->cl_time = get_seconds(); | 1075 | clp->cl_time = get_seconds(); |
982 | clear_bit(0, &clp->cl_cb_slot_busy); | 1076 | clear_bit(0, &clp->cl_cb_slot_busy); |
983 | rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); | 1077 | rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); |
@@ -986,7 +1080,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, | |||
986 | clp->cl_flavor = rqstp->rq_flavor; | 1080 | clp->cl_flavor = rqstp->rq_flavor; |
987 | copy_cred(&clp->cl_cred, &rqstp->rq_cred); | 1081 | copy_cred(&clp->cl_cred, &rqstp->rq_cred); |
988 | gen_confirm(clp); | 1082 | gen_confirm(clp); |
989 | 1083 | clp->cl_cb_session = NULL; | |
990 | return clp; | 1084 | return clp; |
991 | } | 1085 | } |
992 | 1086 | ||
@@ -1051,54 +1145,55 @@ find_unconfirmed_client(clientid_t *clid) | |||
1051 | return NULL; | 1145 | return NULL; |
1052 | } | 1146 | } |
1053 | 1147 | ||
1054 | /* | 1148 | static bool clp_used_exchangeid(struct nfs4_client *clp) |
1055 | * Return 1 iff clp's clientid establishment method matches the use_exchange_id | ||
1056 | * parameter. Matching is based on the fact the at least one of the | ||
1057 | * EXCHGID4_FLAG_USE_{NON_PNFS,PNFS_MDS,PNFS_DS} flags must be set for v4.1 | ||
1058 | * | ||
1059 | * FIXME: we need to unify the clientid namespaces for nfsv4.x | ||
1060 | * and correctly deal with client upgrade/downgrade in EXCHANGE_ID | ||
1061 | * and SET_CLIENTID{,_CONFIRM} | ||
1062 | */ | ||
1063 | static inline int | ||
1064 | match_clientid_establishment(struct nfs4_client *clp, bool use_exchange_id) | ||
1065 | { | 1149 | { |
1066 | bool has_exchange_flags = (clp->cl_exchange_flags != 0); | 1150 | return clp->cl_exchange_flags != 0; |
1067 | return use_exchange_id == has_exchange_flags; | 1151 | } |
1068 | } | ||
1069 | 1152 | ||
1070 | static struct nfs4_client * | 1153 | static struct nfs4_client * |
1071 | find_confirmed_client_by_str(const char *dname, unsigned int hashval, | 1154 | find_confirmed_client_by_str(const char *dname, unsigned int hashval) |
1072 | bool use_exchange_id) | ||
1073 | { | 1155 | { |
1074 | struct nfs4_client *clp; | 1156 | struct nfs4_client *clp; |
1075 | 1157 | ||
1076 | list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { | 1158 | list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { |
1077 | if (same_name(clp->cl_recdir, dname) && | 1159 | if (same_name(clp->cl_recdir, dname)) |
1078 | match_clientid_establishment(clp, use_exchange_id)) | ||
1079 | return clp; | 1160 | return clp; |
1080 | } | 1161 | } |
1081 | return NULL; | 1162 | return NULL; |
1082 | } | 1163 | } |
1083 | 1164 | ||
1084 | static struct nfs4_client * | 1165 | static struct nfs4_client * |
1085 | find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, | 1166 | find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) |
1086 | bool use_exchange_id) | ||
1087 | { | 1167 | { |
1088 | struct nfs4_client *clp; | 1168 | struct nfs4_client *clp; |
1089 | 1169 | ||
1090 | list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { | 1170 | list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { |
1091 | if (same_name(clp->cl_recdir, dname) && | 1171 | if (same_name(clp->cl_recdir, dname)) |
1092 | match_clientid_establishment(clp, use_exchange_id)) | ||
1093 | return clp; | 1172 | return clp; |
1094 | } | 1173 | } |
1095 | return NULL; | 1174 | return NULL; |
1096 | } | 1175 | } |
1097 | 1176 | ||
1177 | static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr) | ||
1178 | { | ||
1179 | switch (family) { | ||
1180 | case AF_INET: | ||
1181 | ((struct sockaddr_in *)sa)->sin_family = AF_INET; | ||
1182 | ((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr; | ||
1183 | return; | ||
1184 | case AF_INET6: | ||
1185 | ((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6; | ||
1186 | ((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6; | ||
1187 | return; | ||
1188 | } | ||
1189 | } | ||
1190 | |||
1098 | static void | 1191 | static void |
1099 | gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) | 1192 | gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) |
1100 | { | 1193 | { |
1101 | struct nfs4_cb_conn *cb = &clp->cl_cb_conn; | 1194 | struct nfs4_cb_conn *conn = &clp->cl_cb_conn; |
1195 | struct sockaddr *sa = svc_addr(rqstp); | ||
1196 | u32 scopeid = rpc_get_scope_id(sa); | ||
1102 | unsigned short expected_family; | 1197 | unsigned short expected_family; |
1103 | 1198 | ||
1104 | /* Currently, we only support tcp and tcp6 for the callback channel */ | 1199 | /* Currently, we only support tcp and tcp6 for the callback channel */ |
@@ -1111,24 +1206,24 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) | |||
1111 | else | 1206 | else |
1112 | goto out_err; | 1207 | goto out_err; |
1113 | 1208 | ||
1114 | cb->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, | 1209 | conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, |
1115 | se->se_callback_addr_len, | 1210 | se->se_callback_addr_len, |
1116 | (struct sockaddr *) &cb->cb_addr, | 1211 | (struct sockaddr *)&conn->cb_addr, |
1117 | sizeof(cb->cb_addr)); | 1212 | sizeof(conn->cb_addr)); |
1118 | 1213 | ||
1119 | if (!cb->cb_addrlen || cb->cb_addr.ss_family != expected_family) | 1214 | if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) |
1120 | goto out_err; | 1215 | goto out_err; |
1121 | 1216 | ||
1122 | if (cb->cb_addr.ss_family == AF_INET6) | 1217 | if (conn->cb_addr.ss_family == AF_INET6) |
1123 | ((struct sockaddr_in6 *) &cb->cb_addr)->sin6_scope_id = scopeid; | 1218 | ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; |
1124 | 1219 | ||
1125 | cb->cb_minorversion = 0; | 1220 | conn->cb_prog = se->se_callback_prog; |
1126 | cb->cb_prog = se->se_callback_prog; | 1221 | conn->cb_ident = se->se_callback_ident; |
1127 | cb->cb_ident = se->se_callback_ident; | 1222 | rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr); |
1128 | return; | 1223 | return; |
1129 | out_err: | 1224 | out_err: |
1130 | cb->cb_addr.ss_family = AF_UNSPEC; | 1225 | conn->cb_addr.ss_family = AF_UNSPEC; |
1131 | cb->cb_addrlen = 0; | 1226 | conn->cb_addrlen = 0; |
1132 | dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " | 1227 | dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " |
1133 | "will not receive delegations\n", | 1228 | "will not receive delegations\n", |
1134 | clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); | 1229 | clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); |
@@ -1264,7 +1359,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1264 | case SP4_NONE: | 1359 | case SP4_NONE: |
1265 | break; | 1360 | break; |
1266 | case SP4_SSV: | 1361 | case SP4_SSV: |
1267 | return nfserr_encr_alg_unsupp; | 1362 | return nfserr_serverfault; |
1268 | default: | 1363 | default: |
1269 | BUG(); /* checked by xdr code */ | 1364 | BUG(); /* checked by xdr code */ |
1270 | case SP4_MACH_CRED: | 1365 | case SP4_MACH_CRED: |
@@ -1281,8 +1376,12 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1281 | nfs4_lock_state(); | 1376 | nfs4_lock_state(); |
1282 | status = nfs_ok; | 1377 | status = nfs_ok; |
1283 | 1378 | ||
1284 | conf = find_confirmed_client_by_str(dname, strhashval, true); | 1379 | conf = find_confirmed_client_by_str(dname, strhashval); |
1285 | if (conf) { | 1380 | if (conf) { |
1381 | if (!clp_used_exchangeid(conf)) { | ||
1382 | status = nfserr_clid_inuse; /* XXX: ? */ | ||
1383 | goto out; | ||
1384 | } | ||
1286 | if (!same_verf(&verf, &conf->cl_verifier)) { | 1385 | if (!same_verf(&verf, &conf->cl_verifier)) { |
1287 | /* 18.35.4 case 8 */ | 1386 | /* 18.35.4 case 8 */ |
1288 | if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { | 1387 | if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { |
@@ -1323,7 +1422,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1323 | goto out; | 1422 | goto out; |
1324 | } | 1423 | } |
1325 | 1424 | ||
1326 | unconf = find_unconfirmed_client_by_str(dname, strhashval, true); | 1425 | unconf = find_unconfirmed_client_by_str(dname, strhashval); |
1327 | if (unconf) { | 1426 | if (unconf) { |
1328 | /* | 1427 | /* |
1329 | * Possible retry or client restart. Per 18.35.4 case 4, | 1428 | * Possible retry or client restart. Per 18.35.4 case 4, |
@@ -1415,9 +1514,14 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1415 | { | 1514 | { |
1416 | struct sockaddr *sa = svc_addr(rqstp); | 1515 | struct sockaddr *sa = svc_addr(rqstp); |
1417 | struct nfs4_client *conf, *unconf; | 1516 | struct nfs4_client *conf, *unconf; |
1517 | struct nfsd4_session *new; | ||
1418 | struct nfsd4_clid_slot *cs_slot = NULL; | 1518 | struct nfsd4_clid_slot *cs_slot = NULL; |
1519 | bool confirm_me = false; | ||
1419 | int status = 0; | 1520 | int status = 0; |
1420 | 1521 | ||
1522 | if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) | ||
1523 | return nfserr_inval; | ||
1524 | |||
1421 | nfs4_lock_state(); | 1525 | nfs4_lock_state(); |
1422 | unconf = find_unconfirmed_client(&cr_ses->clientid); | 1526 | unconf = find_unconfirmed_client(&cr_ses->clientid); |
1423 | conf = find_confirmed_client(&cr_ses->clientid); | 1527 | conf = find_confirmed_client(&cr_ses->clientid); |
@@ -1438,7 +1542,6 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1438 | cs_slot->sl_seqid, cr_ses->seqid); | 1542 | cs_slot->sl_seqid, cr_ses->seqid); |
1439 | goto out; | 1543 | goto out; |
1440 | } | 1544 | } |
1441 | cs_slot->sl_seqid++; | ||
1442 | } else if (unconf) { | 1545 | } else if (unconf) { |
1443 | if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || | 1546 | if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || |
1444 | !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { | 1547 | !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { |
@@ -1451,25 +1554,10 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1451 | if (status) { | 1554 | if (status) { |
1452 | /* an unconfirmed replay returns misordered */ | 1555 | /* an unconfirmed replay returns misordered */ |
1453 | status = nfserr_seq_misordered; | 1556 | status = nfserr_seq_misordered; |
1454 | goto out_cache; | 1557 | goto out; |
1455 | } | 1558 | } |
1456 | 1559 | ||
1457 | cs_slot->sl_seqid++; /* from 0 to 1 */ | 1560 | confirm_me = true; |
1458 | move_to_confirmed(unconf); | ||
1459 | |||
1460 | if (cr_ses->flags & SESSION4_BACK_CHAN) { | ||
1461 | unconf->cl_cb_conn.cb_xprt = rqstp->rq_xprt; | ||
1462 | svc_xprt_get(rqstp->rq_xprt); | ||
1463 | rpc_copy_addr( | ||
1464 | (struct sockaddr *)&unconf->cl_cb_conn.cb_addr, | ||
1465 | sa); | ||
1466 | unconf->cl_cb_conn.cb_addrlen = svc_addr_len(sa); | ||
1467 | unconf->cl_cb_conn.cb_minorversion = | ||
1468 | cstate->minorversion; | ||
1469 | unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog; | ||
1470 | unconf->cl_cb_seq_nr = 1; | ||
1471 | nfsd4_probe_callback(unconf, &unconf->cl_cb_conn); | ||
1472 | } | ||
1473 | conf = unconf; | 1561 | conf = unconf; |
1474 | } else { | 1562 | } else { |
1475 | status = nfserr_stale_clientid; | 1563 | status = nfserr_stale_clientid; |
@@ -1477,22 +1565,32 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1477 | } | 1565 | } |
1478 | 1566 | ||
1479 | /* | 1567 | /* |
1568 | * XXX: we should probably set this at creation time, and check | ||
1569 | * for consistent minorversion use throughout: | ||
1570 | */ | ||
1571 | conf->cl_minorversion = 1; | ||
1572 | /* | ||
1480 | * We do not support RDMA or persistent sessions | 1573 | * We do not support RDMA or persistent sessions |
1481 | */ | 1574 | */ |
1482 | cr_ses->flags &= ~SESSION4_PERSIST; | 1575 | cr_ses->flags &= ~SESSION4_PERSIST; |
1483 | cr_ses->flags &= ~SESSION4_RDMA; | 1576 | cr_ses->flags &= ~SESSION4_RDMA; |
1484 | 1577 | ||
1485 | status = alloc_init_session(rqstp, conf, cr_ses); | 1578 | status = nfserr_jukebox; |
1486 | if (status) | 1579 | new = alloc_init_session(rqstp, conf, cr_ses); |
1580 | if (!new) | ||
1487 | goto out; | 1581 | goto out; |
1488 | 1582 | status = nfs_ok; | |
1489 | memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data, | 1583 | memcpy(cr_ses->sessionid.data, new->se_sessionid.data, |
1490 | NFS4_MAX_SESSIONID_LEN); | 1584 | NFS4_MAX_SESSIONID_LEN); |
1585 | memcpy(&cr_ses->fore_channel, &new->se_fchannel, | ||
1586 | sizeof(struct nfsd4_channel_attrs)); | ||
1587 | cs_slot->sl_seqid++; | ||
1491 | cr_ses->seqid = cs_slot->sl_seqid; | 1588 | cr_ses->seqid = cs_slot->sl_seqid; |
1492 | 1589 | ||
1493 | out_cache: | ||
1494 | /* cache solo and embedded create sessions under the state lock */ | 1590 | /* cache solo and embedded create sessions under the state lock */ |
1495 | nfsd4_cache_create_session(cr_ses, cs_slot, status); | 1591 | nfsd4_cache_create_session(cr_ses, cs_slot, status); |
1592 | if (confirm_me) | ||
1593 | move_to_confirmed(conf); | ||
1496 | out: | 1594 | out: |
1497 | nfs4_unlock_state(); | 1595 | nfs4_unlock_state(); |
1498 | dprintk("%s returns %d\n", __func__, ntohl(status)); | 1596 | dprintk("%s returns %d\n", __func__, ntohl(status)); |
@@ -1507,6 +1605,46 @@ static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) | |||
1507 | return argp->opcnt == resp->opcnt; | 1605 | return argp->opcnt == resp->opcnt; |
1508 | } | 1606 | } |
1509 | 1607 | ||
1608 | static __be32 nfsd4_map_bcts_dir(u32 *dir) | ||
1609 | { | ||
1610 | switch (*dir) { | ||
1611 | case NFS4_CDFC4_FORE: | ||
1612 | case NFS4_CDFC4_BACK: | ||
1613 | return nfs_ok; | ||
1614 | case NFS4_CDFC4_FORE_OR_BOTH: | ||
1615 | case NFS4_CDFC4_BACK_OR_BOTH: | ||
1616 | *dir = NFS4_CDFC4_BOTH; | ||
1617 | return nfs_ok; | ||
1618 | }; | ||
1619 | return nfserr_inval; | ||
1620 | } | ||
1621 | |||
1622 | __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, | ||
1623 | struct nfsd4_compound_state *cstate, | ||
1624 | struct nfsd4_bind_conn_to_session *bcts) | ||
1625 | { | ||
1626 | __be32 status; | ||
1627 | |||
1628 | if (!nfsd4_last_compound_op(rqstp)) | ||
1629 | return nfserr_not_only_op; | ||
1630 | spin_lock(&client_lock); | ||
1631 | cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid); | ||
1632 | /* Sorta weird: we only need the refcnt'ing because new_conn acquires | ||
1633 | * client_lock iself: */ | ||
1634 | if (cstate->session) { | ||
1635 | nfsd4_get_session(cstate->session); | ||
1636 | atomic_inc(&cstate->session->se_client->cl_refcount); | ||
1637 | } | ||
1638 | spin_unlock(&client_lock); | ||
1639 | if (!cstate->session) | ||
1640 | return nfserr_badsession; | ||
1641 | |||
1642 | status = nfsd4_map_bcts_dir(&bcts->dir); | ||
1643 | if (!status) | ||
1644 | nfsd4_new_conn(rqstp, cstate->session, bcts->dir); | ||
1645 | return status; | ||
1646 | } | ||
1647 | |||
1510 | static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) | 1648 | static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) |
1511 | { | 1649 | { |
1512 | if (!session) | 1650 | if (!session) |
@@ -1545,9 +1683,11 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
1545 | spin_unlock(&client_lock); | 1683 | spin_unlock(&client_lock); |
1546 | 1684 | ||
1547 | nfs4_lock_state(); | 1685 | nfs4_lock_state(); |
1548 | /* wait for callbacks */ | 1686 | nfsd4_probe_callback_sync(ses->se_client); |
1549 | nfsd4_set_callback_client(ses->se_client, NULL); | ||
1550 | nfs4_unlock_state(); | 1687 | nfs4_unlock_state(); |
1688 | |||
1689 | nfsd4_del_conns(ses); | ||
1690 | |||
1551 | nfsd4_put_session(ses); | 1691 | nfsd4_put_session(ses); |
1552 | status = nfs_ok; | 1692 | status = nfs_ok; |
1553 | out: | 1693 | out: |
@@ -1555,6 +1695,47 @@ out: | |||
1555 | return status; | 1695 | return status; |
1556 | } | 1696 | } |
1557 | 1697 | ||
1698 | static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) | ||
1699 | { | ||
1700 | struct nfsd4_conn *c; | ||
1701 | |||
1702 | list_for_each_entry(c, &s->se_conns, cn_persession) { | ||
1703 | if (c->cn_xprt == xpt) { | ||
1704 | return c; | ||
1705 | } | ||
1706 | } | ||
1707 | return NULL; | ||
1708 | } | ||
1709 | |||
1710 | static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) | ||
1711 | { | ||
1712 | struct nfs4_client *clp = ses->se_client; | ||
1713 | struct nfsd4_conn *c; | ||
1714 | int ret; | ||
1715 | |||
1716 | spin_lock(&clp->cl_lock); | ||
1717 | c = __nfsd4_find_conn(new->cn_xprt, ses); | ||
1718 | if (c) { | ||
1719 | spin_unlock(&clp->cl_lock); | ||
1720 | free_conn(new); | ||
1721 | return; | ||
1722 | } | ||
1723 | __nfsd4_hash_conn(new, ses); | ||
1724 | spin_unlock(&clp->cl_lock); | ||
1725 | ret = nfsd4_register_conn(new); | ||
1726 | if (ret) | ||
1727 | /* oops; xprt is already down: */ | ||
1728 | nfsd4_conn_lost(&new->cn_xpt_user); | ||
1729 | return; | ||
1730 | } | ||
1731 | |||
1732 | static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) | ||
1733 | { | ||
1734 | struct nfsd4_compoundargs *args = rqstp->rq_argp; | ||
1735 | |||
1736 | return args->opcnt > session->se_fchannel.maxops; | ||
1737 | } | ||
1738 | |||
1558 | __be32 | 1739 | __be32 |
1559 | nfsd4_sequence(struct svc_rqst *rqstp, | 1740 | nfsd4_sequence(struct svc_rqst *rqstp, |
1560 | struct nfsd4_compound_state *cstate, | 1741 | struct nfsd4_compound_state *cstate, |
@@ -1563,17 +1744,30 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1563 | struct nfsd4_compoundres *resp = rqstp->rq_resp; | 1744 | struct nfsd4_compoundres *resp = rqstp->rq_resp; |
1564 | struct nfsd4_session *session; | 1745 | struct nfsd4_session *session; |
1565 | struct nfsd4_slot *slot; | 1746 | struct nfsd4_slot *slot; |
1747 | struct nfsd4_conn *conn; | ||
1566 | int status; | 1748 | int status; |
1567 | 1749 | ||
1568 | if (resp->opcnt != 1) | 1750 | if (resp->opcnt != 1) |
1569 | return nfserr_sequence_pos; | 1751 | return nfserr_sequence_pos; |
1570 | 1752 | ||
1753 | /* | ||
1754 | * Will be either used or freed by nfsd4_sequence_check_conn | ||
1755 | * below. | ||
1756 | */ | ||
1757 | conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); | ||
1758 | if (!conn) | ||
1759 | return nfserr_jukebox; | ||
1760 | |||
1571 | spin_lock(&client_lock); | 1761 | spin_lock(&client_lock); |
1572 | status = nfserr_badsession; | 1762 | status = nfserr_badsession; |
1573 | session = find_in_sessionid_hashtbl(&seq->sessionid); | 1763 | session = find_in_sessionid_hashtbl(&seq->sessionid); |
1574 | if (!session) | 1764 | if (!session) |
1575 | goto out; | 1765 | goto out; |
1576 | 1766 | ||
1767 | status = nfserr_too_many_ops; | ||
1768 | if (nfsd4_session_too_many_ops(rqstp, session)) | ||
1769 | goto out; | ||
1770 | |||
1577 | status = nfserr_badslot; | 1771 | status = nfserr_badslot; |
1578 | if (seq->slotid >= session->se_fchannel.maxreqs) | 1772 | if (seq->slotid >= session->se_fchannel.maxreqs) |
1579 | goto out; | 1773 | goto out; |
@@ -1599,6 +1793,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1599 | if (status) | 1793 | if (status) |
1600 | goto out; | 1794 | goto out; |
1601 | 1795 | ||
1796 | nfsd4_sequence_check_conn(conn, session); | ||
1797 | conn = NULL; | ||
1798 | |||
1602 | /* Success! bump slot seqid */ | 1799 | /* Success! bump slot seqid */ |
1603 | slot->sl_inuse = true; | 1800 | slot->sl_inuse = true; |
1604 | slot->sl_seqid = seq->seqid; | 1801 | slot->sl_seqid = seq->seqid; |
@@ -1610,9 +1807,14 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1610 | out: | 1807 | out: |
1611 | /* Hold a session reference until done processing the compound. */ | 1808 | /* Hold a session reference until done processing the compound. */ |
1612 | if (cstate->session) { | 1809 | if (cstate->session) { |
1810 | struct nfs4_client *clp = session->se_client; | ||
1811 | |||
1613 | nfsd4_get_session(cstate->session); | 1812 | nfsd4_get_session(cstate->session); |
1614 | atomic_inc(&session->se_client->cl_refcount); | 1813 | atomic_inc(&clp->cl_refcount); |
1814 | if (clp->cl_cb_state == NFSD4_CB_DOWN) | ||
1815 | seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN; | ||
1615 | } | 1816 | } |
1817 | kfree(conn); | ||
1616 | spin_unlock(&client_lock); | 1818 | spin_unlock(&client_lock); |
1617 | dprintk("%s: return %d\n", __func__, ntohl(status)); | 1819 | dprintk("%s: return %d\n", __func__, ntohl(status)); |
1618 | return status; | 1820 | return status; |
@@ -1621,6 +1823,8 @@ out: | |||
1621 | __be32 | 1823 | __be32 |
1622 | nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) | 1824 | nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) |
1623 | { | 1825 | { |
1826 | int status = 0; | ||
1827 | |||
1624 | if (rc->rca_one_fs) { | 1828 | if (rc->rca_one_fs) { |
1625 | if (!cstate->current_fh.fh_dentry) | 1829 | if (!cstate->current_fh.fh_dentry) |
1626 | return nfserr_nofilehandle; | 1830 | return nfserr_nofilehandle; |
@@ -1630,9 +1834,14 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta | |||
1630 | */ | 1834 | */ |
1631 | return nfs_ok; | 1835 | return nfs_ok; |
1632 | } | 1836 | } |
1837 | |||
1633 | nfs4_lock_state(); | 1838 | nfs4_lock_state(); |
1634 | if (is_client_expired(cstate->session->se_client)) { | 1839 | status = nfserr_complete_already; |
1635 | nfs4_unlock_state(); | 1840 | if (cstate->session->se_client->cl_firststate) |
1841 | goto out; | ||
1842 | |||
1843 | status = nfserr_stale_clientid; | ||
1844 | if (is_client_expired(cstate->session->se_client)) | ||
1636 | /* | 1845 | /* |
1637 | * The following error isn't really legal. | 1846 | * The following error isn't really legal. |
1638 | * But we only get here if the client just explicitly | 1847 | * But we only get here if the client just explicitly |
@@ -1640,18 +1849,19 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta | |||
1640 | * error it gets back on an operation for the dead | 1849 | * error it gets back on an operation for the dead |
1641 | * client. | 1850 | * client. |
1642 | */ | 1851 | */ |
1643 | return nfserr_stale_clientid; | 1852 | goto out; |
1644 | } | 1853 | |
1854 | status = nfs_ok; | ||
1645 | nfsd4_create_clid_dir(cstate->session->se_client); | 1855 | nfsd4_create_clid_dir(cstate->session->se_client); |
1856 | out: | ||
1646 | nfs4_unlock_state(); | 1857 | nfs4_unlock_state(); |
1647 | return nfs_ok; | 1858 | return status; |
1648 | } | 1859 | } |
1649 | 1860 | ||
1650 | __be32 | 1861 | __be32 |
1651 | nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | 1862 | nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
1652 | struct nfsd4_setclientid *setclid) | 1863 | struct nfsd4_setclientid *setclid) |
1653 | { | 1864 | { |
1654 | struct sockaddr *sa = svc_addr(rqstp); | ||
1655 | struct xdr_netobj clname = { | 1865 | struct xdr_netobj clname = { |
1656 | .len = setclid->se_namelen, | 1866 | .len = setclid->se_namelen, |
1657 | .data = setclid->se_name, | 1867 | .data = setclid->se_name, |
@@ -1677,10 +1887,12 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
1677 | strhashval = clientstr_hashval(dname); | 1887 | strhashval = clientstr_hashval(dname); |
1678 | 1888 | ||
1679 | nfs4_lock_state(); | 1889 | nfs4_lock_state(); |
1680 | conf = find_confirmed_client_by_str(dname, strhashval, false); | 1890 | conf = find_confirmed_client_by_str(dname, strhashval); |
1681 | if (conf) { | 1891 | if (conf) { |
1682 | /* RFC 3530 14.2.33 CASE 0: */ | 1892 | /* RFC 3530 14.2.33 CASE 0: */ |
1683 | status = nfserr_clid_inuse; | 1893 | status = nfserr_clid_inuse; |
1894 | if (clp_used_exchangeid(conf)) | ||
1895 | goto out; | ||
1684 | if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { | 1896 | if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { |
1685 | char addr_str[INET6_ADDRSTRLEN]; | 1897 | char addr_str[INET6_ADDRSTRLEN]; |
1686 | rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, | 1898 | rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, |
@@ -1695,7 +1907,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
1695 | * has a description of SETCLIENTID request processing consisting | 1907 | * has a description of SETCLIENTID request processing consisting |
1696 | * of 5 bullet points, labeled as CASE0 - CASE4 below. | 1908 | * of 5 bullet points, labeled as CASE0 - CASE4 below. |
1697 | */ | 1909 | */ |
1698 | unconf = find_unconfirmed_client_by_str(dname, strhashval, false); | 1910 | unconf = find_unconfirmed_client_by_str(dname, strhashval); |
1699 | status = nfserr_resource; | 1911 | status = nfserr_resource; |
1700 | if (!conf) { | 1912 | if (!conf) { |
1701 | /* | 1913 | /* |
@@ -1747,7 +1959,12 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
1747 | goto out; | 1959 | goto out; |
1748 | gen_clid(new); | 1960 | gen_clid(new); |
1749 | } | 1961 | } |
1750 | gen_callback(new, setclid, rpc_get_scope_id(sa)); | 1962 | /* |
1963 | * XXX: we should probably set this at creation time, and check | ||
1964 | * for consistent minorversion use throughout: | ||
1965 | */ | ||
1966 | new->cl_minorversion = 0; | ||
1967 | gen_callback(new, setclid, rqstp); | ||
1751 | add_to_unconfirmed(new, strhashval); | 1968 | add_to_unconfirmed(new, strhashval); |
1752 | setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; | 1969 | setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; |
1753 | setclid->se_clientid.cl_id = new->cl_clientid.cl_id; | 1970 | setclid->se_clientid.cl_id = new->cl_clientid.cl_id; |
@@ -1806,8 +2023,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
1806 | if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) | 2023 | if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) |
1807 | status = nfserr_clid_inuse; | 2024 | status = nfserr_clid_inuse; |
1808 | else { | 2025 | else { |
1809 | atomic_set(&conf->cl_cb_set, 0); | 2026 | nfsd4_change_callback(conf, &unconf->cl_cb_conn); |
1810 | nfsd4_probe_callback(conf, &unconf->cl_cb_conn); | 2027 | nfsd4_probe_callback(conf); |
1811 | expire_client(unconf); | 2028 | expire_client(unconf); |
1812 | status = nfs_ok; | 2029 | status = nfs_ok; |
1813 | 2030 | ||
@@ -1834,14 +2051,14 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
1834 | unsigned int hash = | 2051 | unsigned int hash = |
1835 | clientstr_hashval(unconf->cl_recdir); | 2052 | clientstr_hashval(unconf->cl_recdir); |
1836 | conf = find_confirmed_client_by_str(unconf->cl_recdir, | 2053 | conf = find_confirmed_client_by_str(unconf->cl_recdir, |
1837 | hash, false); | 2054 | hash); |
1838 | if (conf) { | 2055 | if (conf) { |
1839 | nfsd4_remove_clid_dir(conf); | 2056 | nfsd4_remove_clid_dir(conf); |
1840 | expire_client(conf); | 2057 | expire_client(conf); |
1841 | } | 2058 | } |
1842 | move_to_confirmed(unconf); | 2059 | move_to_confirmed(unconf); |
1843 | conf = unconf; | 2060 | conf = unconf; |
1844 | nfsd4_probe_callback(conf, &conf->cl_cb_conn); | 2061 | nfsd4_probe_callback(conf); |
1845 | status = nfs_ok; | 2062 | status = nfs_ok; |
1846 | } | 2063 | } |
1847 | } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) | 2064 | } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) |
@@ -1877,6 +2094,7 @@ alloc_init_file(struct inode *ino) | |||
1877 | fp->fi_inode = igrab(ino); | 2094 | fp->fi_inode = igrab(ino); |
1878 | fp->fi_id = current_fileid++; | 2095 | fp->fi_id = current_fileid++; |
1879 | fp->fi_had_conflict = false; | 2096 | fp->fi_had_conflict = false; |
2097 | fp->fi_lease = NULL; | ||
1880 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); | 2098 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); |
1881 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); | 2099 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); |
1882 | spin_lock(&recall_lock); | 2100 | spin_lock(&recall_lock); |
@@ -2128,23 +2346,8 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access) | |||
2128 | nfs4_file_put_access(fp, O_RDONLY); | 2346 | nfs4_file_put_access(fp, O_RDONLY); |
2129 | } | 2347 | } |
2130 | 2348 | ||
2131 | /* | 2349 | static void nfsd_break_one_deleg(struct nfs4_delegation *dp) |
2132 | * Spawn a thread to perform a recall on the delegation represented | ||
2133 | * by the lease (file_lock) | ||
2134 | * | ||
2135 | * Called from break_lease() with lock_kernel() held. | ||
2136 | * Note: we assume break_lease will only call this *once* for any given | ||
2137 | * lease. | ||
2138 | */ | ||
2139 | static | ||
2140 | void nfsd_break_deleg_cb(struct file_lock *fl) | ||
2141 | { | 2350 | { |
2142 | struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; | ||
2143 | |||
2144 | dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl); | ||
2145 | if (!dp) | ||
2146 | return; | ||
2147 | |||
2148 | /* We're assuming the state code never drops its reference | 2351 | /* We're assuming the state code never drops its reference |
2149 | * without first removing the lease. Since we're in this lease | 2352 | * without first removing the lease. Since we're in this lease |
2150 | * callback (and since the lease code is serialized by the kernel | 2353 | * callback (and since the lease code is serialized by the kernel |
@@ -2152,75 +2355,37 @@ void nfsd_break_deleg_cb(struct file_lock *fl) | |||
2152 | * it's safe to take a reference: */ | 2355 | * it's safe to take a reference: */ |
2153 | atomic_inc(&dp->dl_count); | 2356 | atomic_inc(&dp->dl_count); |
2154 | 2357 | ||
2155 | spin_lock(&recall_lock); | ||
2156 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); | 2358 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); |
2157 | spin_unlock(&recall_lock); | ||
2158 | 2359 | ||
2159 | /* only place dl_time is set. protected by lock_kernel*/ | 2360 | /* only place dl_time is set. protected by lock_flocks*/ |
2160 | dp->dl_time = get_seconds(); | 2361 | dp->dl_time = get_seconds(); |
2161 | 2362 | ||
2162 | /* | ||
2163 | * We don't want the locks code to timeout the lease for us; | ||
2164 | * we'll remove it ourself if the delegation isn't returned | ||
2165 | * in time. | ||
2166 | */ | ||
2167 | fl->fl_break_time = 0; | ||
2168 | |||
2169 | dp->dl_file->fi_had_conflict = true; | ||
2170 | nfsd4_cb_recall(dp); | 2363 | nfsd4_cb_recall(dp); |
2171 | } | 2364 | } |
2172 | 2365 | ||
2173 | /* | 2366 | /* Called from break_lease() with lock_flocks() held. */ |
2174 | * The file_lock is being reapd. | 2367 | static void nfsd_break_deleg_cb(struct file_lock *fl) |
2175 | * | ||
2176 | * Called by locks_free_lock() with lock_kernel() held. | ||
2177 | */ | ||
2178 | static | ||
2179 | void nfsd_release_deleg_cb(struct file_lock *fl) | ||
2180 | { | ||
2181 | struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; | ||
2182 | |||
2183 | dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count)); | ||
2184 | |||
2185 | if (!(fl->fl_flags & FL_LEASE) || !dp) | ||
2186 | return; | ||
2187 | dp->dl_flock = NULL; | ||
2188 | } | ||
2189 | |||
2190 | /* | ||
2191 | * Set the delegation file_lock back pointer. | ||
2192 | * | ||
2193 | * Called from setlease() with lock_kernel() held. | ||
2194 | */ | ||
2195 | static | ||
2196 | void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl) | ||
2197 | { | ||
2198 | struct nfs4_delegation *dp = (struct nfs4_delegation *)new->fl_owner; | ||
2199 | |||
2200 | dprintk("NFSD: nfsd_copy_lock_deleg_cb: new fl %p dp %p\n", new, dp); | ||
2201 | if (!dp) | ||
2202 | return; | ||
2203 | dp->dl_flock = new; | ||
2204 | } | ||
2205 | |||
2206 | /* | ||
2207 | * Called from setlease() with lock_kernel() held | ||
2208 | */ | ||
2209 | static | ||
2210 | int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try) | ||
2211 | { | 2368 | { |
2212 | struct nfs4_delegation *onlistd = | 2369 | struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; |
2213 | (struct nfs4_delegation *)onlist->fl_owner; | 2370 | struct nfs4_delegation *dp; |
2214 | struct nfs4_delegation *tryd = | ||
2215 | (struct nfs4_delegation *)try->fl_owner; | ||
2216 | 2371 | ||
2217 | if (onlist->fl_lmops != try->fl_lmops) | 2372 | BUG_ON(!fp); |
2218 | return 0; | 2373 | /* We assume break_lease is only called once per lease: */ |
2374 | BUG_ON(fp->fi_had_conflict); | ||
2375 | /* | ||
2376 | * We don't want the locks code to timeout the lease for us; | ||
2377 | * we'll remove it ourself if a delegation isn't returned | ||
2378 | * in time: | ||
2379 | */ | ||
2380 | fl->fl_break_time = 0; | ||
2219 | 2381 | ||
2220 | return onlistd->dl_client == tryd->dl_client; | 2382 | spin_lock(&recall_lock); |
2383 | fp->fi_had_conflict = true; | ||
2384 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) | ||
2385 | nfsd_break_one_deleg(dp); | ||
2386 | spin_unlock(&recall_lock); | ||
2221 | } | 2387 | } |
2222 | 2388 | ||
2223 | |||
2224 | static | 2389 | static |
2225 | int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) | 2390 | int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) |
2226 | { | 2391 | { |
@@ -2232,9 +2397,6 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) | |||
2232 | 2397 | ||
2233 | static const struct lock_manager_operations nfsd_lease_mng_ops = { | 2398 | static const struct lock_manager_operations nfsd_lease_mng_ops = { |
2234 | .fl_break = nfsd_break_deleg_cb, | 2399 | .fl_break = nfsd_break_deleg_cb, |
2235 | .fl_release_private = nfsd_release_deleg_cb, | ||
2236 | .fl_copy_lock = nfsd_copy_lock_deleg_cb, | ||
2237 | .fl_mylease = nfsd_same_client_deleg_cb, | ||
2238 | .fl_change = nfsd_change_deleg_cb, | 2400 | .fl_change = nfsd_change_deleg_cb, |
2239 | }; | 2401 | }; |
2240 | 2402 | ||
@@ -2314,14 +2476,17 @@ find_delegation_file(struct nfs4_file *fp, stateid_t *stid) | |||
2314 | { | 2476 | { |
2315 | struct nfs4_delegation *dp; | 2477 | struct nfs4_delegation *dp; |
2316 | 2478 | ||
2317 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { | 2479 | spin_lock(&recall_lock); |
2318 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) | 2480 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) |
2481 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) { | ||
2482 | spin_unlock(&recall_lock); | ||
2319 | return dp; | 2483 | return dp; |
2320 | } | 2484 | } |
2485 | spin_unlock(&recall_lock); | ||
2321 | return NULL; | 2486 | return NULL; |
2322 | } | 2487 | } |
2323 | 2488 | ||
2324 | int share_access_to_flags(u32 share_access) | 2489 | static int share_access_to_flags(u32 share_access) |
2325 | { | 2490 | { |
2326 | share_access &= ~NFS4_SHARE_WANT_MASK; | 2491 | share_access &= ~NFS4_SHARE_WANT_MASK; |
2327 | 2492 | ||
@@ -2401,8 +2566,6 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file | |||
2401 | if (!fp->fi_fds[oflag]) { | 2566 | if (!fp->fi_fds[oflag]) { |
2402 | status = nfsd_open(rqstp, cur_fh, S_IFREG, access, | 2567 | status = nfsd_open(rqstp, cur_fh, S_IFREG, access, |
2403 | &fp->fi_fds[oflag]); | 2568 | &fp->fi_fds[oflag]); |
2404 | if (status == nfserr_dropit) | ||
2405 | status = nfserr_jukebox; | ||
2406 | if (status) | 2569 | if (status) |
2407 | return status; | 2570 | return status; |
2408 | } | 2571 | } |
@@ -2483,6 +2646,79 @@ nfs4_set_claim_prev(struct nfsd4_open *open) | |||
2483 | open->op_stateowner->so_client->cl_firststate = 1; | 2646 | open->op_stateowner->so_client->cl_firststate = 1; |
2484 | } | 2647 | } |
2485 | 2648 | ||
2649 | /* Should we give out recallable state?: */ | ||
2650 | static bool nfsd4_cb_channel_good(struct nfs4_client *clp) | ||
2651 | { | ||
2652 | if (clp->cl_cb_state == NFSD4_CB_UP) | ||
2653 | return true; | ||
2654 | /* | ||
2655 | * In the sessions case, since we don't have to establish a | ||
2656 | * separate connection for callbacks, we assume it's OK | ||
2657 | * until we hear otherwise: | ||
2658 | */ | ||
2659 | return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; | ||
2660 | } | ||
2661 | |||
2662 | static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) | ||
2663 | { | ||
2664 | struct file_lock *fl; | ||
2665 | |||
2666 | fl = locks_alloc_lock(); | ||
2667 | if (!fl) | ||
2668 | return NULL; | ||
2669 | locks_init_lock(fl); | ||
2670 | fl->fl_lmops = &nfsd_lease_mng_ops; | ||
2671 | fl->fl_flags = FL_LEASE; | ||
2672 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | ||
2673 | fl->fl_end = OFFSET_MAX; | ||
2674 | fl->fl_owner = (fl_owner_t)(dp->dl_file); | ||
2675 | fl->fl_pid = current->tgid; | ||
2676 | return fl; | ||
2677 | } | ||
2678 | |||
2679 | static int nfs4_setlease(struct nfs4_delegation *dp, int flag) | ||
2680 | { | ||
2681 | struct nfs4_file *fp = dp->dl_file; | ||
2682 | struct file_lock *fl; | ||
2683 | int status; | ||
2684 | |||
2685 | fl = nfs4_alloc_init_lease(dp, flag); | ||
2686 | if (!fl) | ||
2687 | return -ENOMEM; | ||
2688 | fl->fl_file = find_readable_file(fp); | ||
2689 | list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); | ||
2690 | status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); | ||
2691 | if (status) { | ||
2692 | list_del_init(&dp->dl_perclnt); | ||
2693 | locks_free_lock(fl); | ||
2694 | return -ENOMEM; | ||
2695 | } | ||
2696 | fp->fi_lease = fl; | ||
2697 | fp->fi_deleg_file = fl->fl_file; | ||
2698 | get_file(fp->fi_deleg_file); | ||
2699 | atomic_set(&fp->fi_delegees, 1); | ||
2700 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
2701 | return 0; | ||
2702 | } | ||
2703 | |||
2704 | static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) | ||
2705 | { | ||
2706 | struct nfs4_file *fp = dp->dl_file; | ||
2707 | |||
2708 | if (!fp->fi_lease) | ||
2709 | return nfs4_setlease(dp, flag); | ||
2710 | spin_lock(&recall_lock); | ||
2711 | if (fp->fi_had_conflict) { | ||
2712 | spin_unlock(&recall_lock); | ||
2713 | return -EAGAIN; | ||
2714 | } | ||
2715 | atomic_inc(&fp->fi_delegees); | ||
2716 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
2717 | spin_unlock(&recall_lock); | ||
2718 | list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); | ||
2719 | return 0; | ||
2720 | } | ||
2721 | |||
2486 | /* | 2722 | /* |
2487 | * Attempt to hand out a delegation. | 2723 | * Attempt to hand out a delegation. |
2488 | */ | 2724 | */ |
@@ -2491,10 +2727,10 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2491 | { | 2727 | { |
2492 | struct nfs4_delegation *dp; | 2728 | struct nfs4_delegation *dp; |
2493 | struct nfs4_stateowner *sop = stp->st_stateowner; | 2729 | struct nfs4_stateowner *sop = stp->st_stateowner; |
2494 | int cb_up = atomic_read(&sop->so_client->cl_cb_set); | 2730 | int cb_up; |
2495 | struct file_lock fl, *flp = &fl; | ||
2496 | int status, flag = 0; | 2731 | int status, flag = 0; |
2497 | 2732 | ||
2733 | cb_up = nfsd4_cb_channel_good(sop->so_client); | ||
2498 | flag = NFS4_OPEN_DELEGATE_NONE; | 2734 | flag = NFS4_OPEN_DELEGATE_NONE; |
2499 | open->op_recall = 0; | 2735 | open->op_recall = 0; |
2500 | switch (open->op_claim_type) { | 2736 | switch (open->op_claim_type) { |
@@ -2522,29 +2758,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2522 | } | 2758 | } |
2523 | 2759 | ||
2524 | dp = alloc_init_deleg(sop->so_client, stp, fh, flag); | 2760 | dp = alloc_init_deleg(sop->so_client, stp, fh, flag); |
2525 | if (dp == NULL) { | 2761 | if (dp == NULL) |
2526 | flag = NFS4_OPEN_DELEGATE_NONE; | 2762 | goto out_no_deleg; |
2527 | goto out; | 2763 | status = nfs4_set_delegation(dp, flag); |
2528 | } | 2764 | if (status) |
2529 | locks_init_lock(&fl); | 2765 | goto out_free; |
2530 | fl.fl_lmops = &nfsd_lease_mng_ops; | ||
2531 | fl.fl_flags = FL_LEASE; | ||
2532 | fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | ||
2533 | fl.fl_end = OFFSET_MAX; | ||
2534 | fl.fl_owner = (fl_owner_t)dp; | ||
2535 | fl.fl_file = find_readable_file(stp->st_file); | ||
2536 | BUG_ON(!fl.fl_file); | ||
2537 | fl.fl_pid = current->tgid; | ||
2538 | |||
2539 | /* vfs_setlease checks to see if delegation should be handed out. | ||
2540 | * the lock_manager callbacks fl_mylease and fl_change are used | ||
2541 | */ | ||
2542 | if ((status = vfs_setlease(fl.fl_file, fl.fl_type, &flp))) { | ||
2543 | dprintk("NFSD: setlease failed [%d], no delegation\n", status); | ||
2544 | unhash_delegation(dp); | ||
2545 | flag = NFS4_OPEN_DELEGATE_NONE; | ||
2546 | goto out; | ||
2547 | } | ||
2548 | 2766 | ||
2549 | memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); | 2767 | memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); |
2550 | 2768 | ||
@@ -2556,6 +2774,12 @@ out: | |||
2556 | && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) | 2774 | && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) |
2557 | dprintk("NFSD: WARNING: refusing delegation reclaim\n"); | 2775 | dprintk("NFSD: WARNING: refusing delegation reclaim\n"); |
2558 | open->op_delegate_type = flag; | 2776 | open->op_delegate_type = flag; |
2777 | return; | ||
2778 | out_free: | ||
2779 | nfs4_put_delegation(dp); | ||
2780 | out_no_deleg: | ||
2781 | flag = NFS4_OPEN_DELEGATE_NONE; | ||
2782 | goto out; | ||
2559 | } | 2783 | } |
2560 | 2784 | ||
2561 | /* | 2785 | /* |
@@ -2674,7 +2898,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
2674 | renew_client(clp); | 2898 | renew_client(clp); |
2675 | status = nfserr_cb_path_down; | 2899 | status = nfserr_cb_path_down; |
2676 | if (!list_empty(&clp->cl_delegations) | 2900 | if (!list_empty(&clp->cl_delegations) |
2677 | && !atomic_read(&clp->cl_cb_set)) | 2901 | && clp->cl_cb_state != NFSD4_CB_UP) |
2678 | goto out; | 2902 | goto out; |
2679 | status = nfs_ok; | 2903 | status = nfs_ok; |
2680 | out: | 2904 | out: |
@@ -2682,7 +2906,7 @@ out: | |||
2682 | return status; | 2906 | return status; |
2683 | } | 2907 | } |
2684 | 2908 | ||
2685 | struct lock_manager nfsd4_manager = { | 2909 | static struct lock_manager nfsd4_manager = { |
2686 | }; | 2910 | }; |
2687 | 2911 | ||
2688 | static void | 2912 | static void |
@@ -2750,8 +2974,6 @@ nfs4_laundromat(void) | |||
2750 | test_val = u; | 2974 | test_val = u; |
2751 | break; | 2975 | break; |
2752 | } | 2976 | } |
2753 | dprintk("NFSD: purging unused delegation dp %p, fp %p\n", | ||
2754 | dp, dp->dl_flock); | ||
2755 | list_move(&dp->dl_recall_lru, &reaplist); | 2977 | list_move(&dp->dl_recall_lru, &reaplist); |
2756 | } | 2978 | } |
2757 | spin_unlock(&recall_lock); | 2979 | spin_unlock(&recall_lock); |
@@ -2861,7 +3083,7 @@ check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags) | |||
2861 | if (ONE_STATEID(stateid) && (flags & RD_STATE)) | 3083 | if (ONE_STATEID(stateid) && (flags & RD_STATE)) |
2862 | return nfs_ok; | 3084 | return nfs_ok; |
2863 | else if (locks_in_grace()) { | 3085 | else if (locks_in_grace()) { |
2864 | /* Answer in remaining cases depends on existance of | 3086 | /* Answer in remaining cases depends on existence of |
2865 | * conflicting state; so we must wait out the grace period. */ | 3087 | * conflicting state; so we must wait out the grace period. */ |
2866 | return nfserr_grace; | 3088 | return nfserr_grace; |
2867 | } else if (flags & WR_STATE) | 3089 | } else if (flags & WR_STATE) |
@@ -2944,7 +3166,11 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | |||
2944 | if (STALE_STATEID(stateid)) | 3166 | if (STALE_STATEID(stateid)) |
2945 | goto out; | 3167 | goto out; |
2946 | 3168 | ||
2947 | status = nfserr_bad_stateid; | 3169 | /* |
3170 | * We assume that any stateid that has the current boot time, | ||
3171 | * but that we can't find, is expired: | ||
3172 | */ | ||
3173 | status = nfserr_expired; | ||
2948 | if (is_delegation_stateid(stateid)) { | 3174 | if (is_delegation_stateid(stateid)) { |
2949 | dp = find_delegation_stateid(ino, stateid); | 3175 | dp = find_delegation_stateid(ino, stateid); |
2950 | if (!dp) | 3176 | if (!dp) |
@@ -2957,13 +3183,15 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | |||
2957 | if (status) | 3183 | if (status) |
2958 | goto out; | 3184 | goto out; |
2959 | renew_client(dp->dl_client); | 3185 | renew_client(dp->dl_client); |
2960 | if (filpp) | 3186 | if (filpp) { |
2961 | *filpp = find_readable_file(dp->dl_file); | 3187 | *filpp = dp->dl_file->fi_deleg_file; |
2962 | BUG_ON(!*filpp); | 3188 | BUG_ON(!*filpp); |
3189 | } | ||
2963 | } else { /* open or lock stateid */ | 3190 | } else { /* open or lock stateid */ |
2964 | stp = find_stateid(stateid, flags); | 3191 | stp = find_stateid(stateid, flags); |
2965 | if (!stp) | 3192 | if (!stp) |
2966 | goto out; | 3193 | goto out; |
3194 | status = nfserr_bad_stateid; | ||
2967 | if (nfs4_check_fh(current_fh, stp)) | 3195 | if (nfs4_check_fh(current_fh, stp)) |
2968 | goto out; | 3196 | goto out; |
2969 | if (!stp->st_stateowner->so_confirmed) | 3197 | if (!stp->st_stateowner->so_confirmed) |
@@ -3038,8 +3266,9 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, | |||
3038 | * a replayed close: | 3266 | * a replayed close: |
3039 | */ | 3267 | */ |
3040 | sop = search_close_lru(stateid->si_stateownerid, flags); | 3268 | sop = search_close_lru(stateid->si_stateownerid, flags); |
3269 | /* It's not stale; let's assume it's expired: */ | ||
3041 | if (sop == NULL) | 3270 | if (sop == NULL) |
3042 | return nfserr_bad_stateid; | 3271 | return nfserr_expired; |
3043 | *sopp = sop; | 3272 | *sopp = sop; |
3044 | goto check_replay; | 3273 | goto check_replay; |
3045 | } | 3274 | } |
@@ -3304,6 +3533,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3304 | status = nfserr_bad_stateid; | 3533 | status = nfserr_bad_stateid; |
3305 | if (!is_delegation_stateid(stateid)) | 3534 | if (!is_delegation_stateid(stateid)) |
3306 | goto out; | 3535 | goto out; |
3536 | status = nfserr_expired; | ||
3307 | dp = find_delegation_stateid(inode, stateid); | 3537 | dp = find_delegation_stateid(inode, stateid); |
3308 | if (!dp) | 3538 | if (!dp) |
3309 | goto out; | 3539 | goto out; |
@@ -3473,7 +3703,7 @@ find_lockstateowner_str(struct inode *inode, clientid_t *clid, | |||
3473 | /* | 3703 | /* |
3474 | * Alloc a lock owner structure. | 3704 | * Alloc a lock owner structure. |
3475 | * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has | 3705 | * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has |
3476 | * occured. | 3706 | * occurred. |
3477 | * | 3707 | * |
3478 | * strhashval = lock_ownerstr_hashval | 3708 | * strhashval = lock_ownerstr_hashval |
3479 | */ | 3709 | */ |
@@ -3534,6 +3764,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc | |||
3534 | stp->st_stateid.si_stateownerid = sop->so_id; | 3764 | stp->st_stateid.si_stateownerid = sop->so_id; |
3535 | stp->st_stateid.si_fileid = fp->fi_id; | 3765 | stp->st_stateid.si_fileid = fp->fi_id; |
3536 | stp->st_stateid.si_generation = 0; | 3766 | stp->st_stateid.si_generation = 0; |
3767 | stp->st_access_bmap = 0; | ||
3537 | stp->st_deny_bmap = open_stp->st_deny_bmap; | 3768 | stp->st_deny_bmap = open_stp->st_deny_bmap; |
3538 | stp->st_openstp = open_stp; | 3769 | stp->st_openstp = open_stp; |
3539 | 3770 | ||
@@ -3548,6 +3779,17 @@ check_lock_length(u64 offset, u64 length) | |||
3548 | LOFF_OVERFLOW(offset, length))); | 3779 | LOFF_OVERFLOW(offset, length))); |
3549 | } | 3780 | } |
3550 | 3781 | ||
3782 | static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access) | ||
3783 | { | ||
3784 | struct nfs4_file *fp = lock_stp->st_file; | ||
3785 | int oflag = nfs4_access_to_omode(access); | ||
3786 | |||
3787 | if (test_bit(access, &lock_stp->st_access_bmap)) | ||
3788 | return; | ||
3789 | nfs4_file_get_access(fp, oflag); | ||
3790 | __set_bit(access, &lock_stp->st_access_bmap); | ||
3791 | } | ||
3792 | |||
3551 | /* | 3793 | /* |
3552 | * LOCK operation | 3794 | * LOCK operation |
3553 | */ | 3795 | */ |
@@ -3564,7 +3806,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3564 | struct file_lock conflock; | 3806 | struct file_lock conflock; |
3565 | __be32 status = 0; | 3807 | __be32 status = 0; |
3566 | unsigned int strhashval; | 3808 | unsigned int strhashval; |
3567 | unsigned int cmd; | ||
3568 | int err; | 3809 | int err; |
3569 | 3810 | ||
3570 | dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", | 3811 | dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", |
@@ -3646,22 +3887,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3646 | switch (lock->lk_type) { | 3887 | switch (lock->lk_type) { |
3647 | case NFS4_READ_LT: | 3888 | case NFS4_READ_LT: |
3648 | case NFS4_READW_LT: | 3889 | case NFS4_READW_LT: |
3649 | if (find_readable_file(lock_stp->st_file)) { | 3890 | filp = find_readable_file(lock_stp->st_file); |
3650 | nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ); | 3891 | if (filp) |
3651 | filp = find_readable_file(lock_stp->st_file); | 3892 | get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); |
3652 | } | ||
3653 | file_lock.fl_type = F_RDLCK; | 3893 | file_lock.fl_type = F_RDLCK; |
3654 | cmd = F_SETLK; | 3894 | break; |
3655 | break; | ||
3656 | case NFS4_WRITE_LT: | 3895 | case NFS4_WRITE_LT: |
3657 | case NFS4_WRITEW_LT: | 3896 | case NFS4_WRITEW_LT: |
3658 | if (find_writeable_file(lock_stp->st_file)) { | 3897 | filp = find_writeable_file(lock_stp->st_file); |
3659 | nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE); | 3898 | if (filp) |
3660 | filp = find_writeable_file(lock_stp->st_file); | 3899 | get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); |
3661 | } | ||
3662 | file_lock.fl_type = F_WRLCK; | 3900 | file_lock.fl_type = F_WRLCK; |
3663 | cmd = F_SETLK; | 3901 | break; |
3664 | break; | ||
3665 | default: | 3902 | default: |
3666 | status = nfserr_inval; | 3903 | status = nfserr_inval; |
3667 | goto out; | 3904 | goto out; |
@@ -3685,7 +3922,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3685 | * Note: locks.c uses the BKL to protect the inode's lock list. | 3922 | * Note: locks.c uses the BKL to protect the inode's lock list. |
3686 | */ | 3923 | */ |
3687 | 3924 | ||
3688 | err = vfs_lock_file(filp, cmd, &file_lock, &conflock); | 3925 | err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); |
3689 | switch (-err) { | 3926 | switch (-err) { |
3690 | case 0: /* success! */ | 3927 | case 0: /* success! */ |
3691 | update_stateid(&lock_stp->st_stateid); | 3928 | update_stateid(&lock_stp->st_stateid); |
@@ -3895,7 +4132,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner) | |||
3895 | struct inode *inode = filp->fi_inode; | 4132 | struct inode *inode = filp->fi_inode; |
3896 | int status = 0; | 4133 | int status = 0; |
3897 | 4134 | ||
3898 | lock_kernel(); | 4135 | lock_flocks(); |
3899 | for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { | 4136 | for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { |
3900 | if ((*flpp)->fl_owner == (fl_owner_t)lowner) { | 4137 | if ((*flpp)->fl_owner == (fl_owner_t)lowner) { |
3901 | status = 1; | 4138 | status = 1; |
@@ -3903,7 +4140,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner) | |||
3903 | } | 4140 | } |
3904 | } | 4141 | } |
3905 | out: | 4142 | out: |
3906 | unlock_kernel(); | 4143 | unlock_flocks(); |
3907 | return status; | 4144 | return status; |
3908 | } | 4145 | } |
3909 | 4146 | ||
@@ -3980,7 +4217,7 @@ nfs4_has_reclaimed_state(const char *name, bool use_exchange_id) | |||
3980 | unsigned int strhashval = clientstr_hashval(name); | 4217 | unsigned int strhashval = clientstr_hashval(name); |
3981 | struct nfs4_client *clp; | 4218 | struct nfs4_client *clp; |
3982 | 4219 | ||
3983 | clp = find_confirmed_client_by_str(name, strhashval, use_exchange_id); | 4220 | clp = find_confirmed_client_by_str(name, strhashval); |
3984 | return clp ? 1 : 0; | 4221 | return clp ? 1 : 0; |
3985 | } | 4222 | } |
3986 | 4223 | ||
@@ -4209,7 +4446,7 @@ __nfs4_state_shutdown(void) | |||
4209 | void | 4446 | void |
4210 | nfs4_state_shutdown(void) | 4447 | nfs4_state_shutdown(void) |
4211 | { | 4448 | { |
4212 | cancel_rearming_delayed_workqueue(laundry_wq, &laundromat_work); | 4449 | cancel_delayed_work_sync(&laundromat_work); |
4213 | destroy_workqueue(laundry_wq); | 4450 | destroy_workqueue(laundry_wq); |
4214 | locks_end_grace(&nfsd4_manager); | 4451 | locks_end_grace(&nfsd4_manager); |
4215 | nfs4_lock_state(); | 4452 | nfs4_lock_state(); |