diff options
Diffstat (limited to 'fs/nfsd/nfs4state.c')
-rw-r--r-- | fs/nfsd/nfs4state.c | 553 |
1 files changed, 340 insertions, 213 deletions
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index cf0d2ffb3c84..ad2bfa68d534 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -33,7 +33,7 @@ | |||
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/file.h> | 35 | #include <linux/file.h> |
36 | #include <linux/smp_lock.h> | 36 | #include <linux/fs.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/namei.h> | 38 | #include <linux/namei.h> |
39 | #include <linux/swap.h> | 39 | #include <linux/swap.h> |
@@ -207,7 +207,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
207 | { | 207 | { |
208 | struct nfs4_delegation *dp; | 208 | struct nfs4_delegation *dp; |
209 | struct nfs4_file *fp = stp->st_file; | 209 | struct nfs4_file *fp = stp->st_file; |
210 | struct nfs4_cb_conn *cb = &stp->st_stateowner->so_client->cl_cb_conn; | ||
211 | 210 | ||
212 | dprintk("NFSD alloc_init_deleg\n"); | 211 | dprintk("NFSD alloc_init_deleg\n"); |
213 | /* | 212 | /* |
@@ -234,7 +233,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
234 | nfs4_file_get_access(fp, O_RDONLY); | 233 | nfs4_file_get_access(fp, O_RDONLY); |
235 | dp->dl_flock = NULL; | 234 | dp->dl_flock = NULL; |
236 | dp->dl_type = type; | 235 | dp->dl_type = type; |
237 | dp->dl_ident = cb->cb_ident; | ||
238 | dp->dl_stateid.si_boot = boot_time; | 236 | dp->dl_stateid.si_boot = boot_time; |
239 | dp->dl_stateid.si_stateownerid = current_delegid++; | 237 | dp->dl_stateid.si_stateownerid = current_delegid++; |
240 | dp->dl_stateid.si_fileid = 0; | 238 | dp->dl_stateid.si_fileid = 0; |
@@ -535,171 +533,262 @@ gen_sessionid(struct nfsd4_session *ses) | |||
535 | */ | 533 | */ |
536 | #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) | 534 | #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) |
537 | 535 | ||
536 | static void | ||
537 | free_session_slots(struct nfsd4_session *ses) | ||
538 | { | ||
539 | int i; | ||
540 | |||
541 | for (i = 0; i < ses->se_fchannel.maxreqs; i++) | ||
542 | kfree(ses->se_slots[i]); | ||
543 | } | ||
544 | |||
538 | /* | 545 | /* |
539 | * Give the client the number of ca_maxresponsesize_cached slots it | 546 | * We don't actually need to cache the rpc and session headers, so we |
540 | * requests, of size bounded by NFSD_SLOT_CACHE_SIZE, | 547 | * can allocate a little less for each slot: |
541 | * NFSD_MAX_MEM_PER_SESSION, and nfsd_drc_max_mem. Do not allow more | 548 | */ |
542 | * than NFSD_MAX_SLOTS_PER_SESSION. | 549 | static inline int slot_bytes(struct nfsd4_channel_attrs *ca) |
543 | * | 550 | { |
544 | * If we run out of reserved DRC memory we should (up to a point) | 551 | return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; |
552 | } | ||
553 | |||
554 | static int nfsd4_sanitize_slot_size(u32 size) | ||
555 | { | ||
556 | size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ | ||
557 | size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); | ||
558 | |||
559 | return size; | ||
560 | } | ||
561 | |||
562 | /* | ||
563 | * XXX: If we run out of reserved DRC memory we could (up to a point) | ||
545 | * re-negotiate active sessions and reduce their slot usage to make | 564 | * re-negotiate active sessions and reduce their slot usage to make |
546 | * rooom for new connections. For now we just fail the create session. | 565 | * rooom for new connections. For now we just fail the create session. |
547 | */ | 566 | */ |
548 | static int set_forechannel_drc_size(struct nfsd4_channel_attrs *fchan) | 567 | static int nfsd4_get_drc_mem(int slotsize, u32 num) |
549 | { | 568 | { |
550 | int mem, size = fchan->maxresp_cached; | 569 | int avail; |
551 | 570 | ||
552 | if (fchan->maxreqs < 1) | 571 | num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION); |
553 | return nfserr_inval; | ||
554 | 572 | ||
555 | if (size < NFSD_MIN_HDR_SEQ_SZ) | 573 | spin_lock(&nfsd_drc_lock); |
556 | size = NFSD_MIN_HDR_SEQ_SZ; | 574 | avail = min_t(int, NFSD_MAX_MEM_PER_SESSION, |
557 | size -= NFSD_MIN_HDR_SEQ_SZ; | 575 | nfsd_drc_max_mem - nfsd_drc_mem_used); |
558 | if (size > NFSD_SLOT_CACHE_SIZE) | 576 | num = min_t(int, num, avail / slotsize); |
559 | size = NFSD_SLOT_CACHE_SIZE; | 577 | nfsd_drc_mem_used += num * slotsize; |
560 | 578 | spin_unlock(&nfsd_drc_lock); | |
561 | /* bound the maxreqs by NFSD_MAX_MEM_PER_SESSION */ | 579 | |
562 | mem = fchan->maxreqs * size; | 580 | return num; |
563 | if (mem > NFSD_MAX_MEM_PER_SESSION) { | 581 | } |
564 | fchan->maxreqs = NFSD_MAX_MEM_PER_SESSION / size; | ||
565 | if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION) | ||
566 | fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION; | ||
567 | mem = fchan->maxreqs * size; | ||
568 | } | ||
569 | 582 | ||
583 | static void nfsd4_put_drc_mem(int slotsize, int num) | ||
584 | { | ||
570 | spin_lock(&nfsd_drc_lock); | 585 | spin_lock(&nfsd_drc_lock); |
571 | /* bound the total session drc memory ussage */ | 586 | nfsd_drc_mem_used -= slotsize * num; |
572 | if (mem + nfsd_drc_mem_used > nfsd_drc_max_mem) { | ||
573 | fchan->maxreqs = (nfsd_drc_max_mem - nfsd_drc_mem_used) / size; | ||
574 | mem = fchan->maxreqs * size; | ||
575 | } | ||
576 | nfsd_drc_mem_used += mem; | ||
577 | spin_unlock(&nfsd_drc_lock); | 587 | spin_unlock(&nfsd_drc_lock); |
588 | } | ||
578 | 589 | ||
579 | if (fchan->maxreqs == 0) | 590 | static struct nfsd4_session *alloc_session(int slotsize, int numslots) |
580 | return nfserr_jukebox; | 591 | { |
592 | struct nfsd4_session *new; | ||
593 | int mem, i; | ||
581 | 594 | ||
582 | fchan->maxresp_cached = size + NFSD_MIN_HDR_SEQ_SZ; | 595 | BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *) |
583 | return 0; | 596 | + sizeof(struct nfsd4_session) > PAGE_SIZE); |
597 | mem = numslots * sizeof(struct nfsd4_slot *); | ||
598 | |||
599 | new = kzalloc(sizeof(*new) + mem, GFP_KERNEL); | ||
600 | if (!new) | ||
601 | return NULL; | ||
602 | /* allocate each struct nfsd4_slot and data cache in one piece */ | ||
603 | for (i = 0; i < numslots; i++) { | ||
604 | mem = sizeof(struct nfsd4_slot) + slotsize; | ||
605 | new->se_slots[i] = kzalloc(mem, GFP_KERNEL); | ||
606 | if (!new->se_slots[i]) | ||
607 | goto out_free; | ||
608 | } | ||
609 | return new; | ||
610 | out_free: | ||
611 | while (i--) | ||
612 | kfree(new->se_slots[i]); | ||
613 | kfree(new); | ||
614 | return NULL; | ||
584 | } | 615 | } |
585 | 616 | ||
586 | /* | 617 | static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize) |
587 | * fchan holds the client values on input, and the server values on output | ||
588 | * sv_max_mesg is the maximum payload plus one page for overhead. | ||
589 | */ | ||
590 | static int init_forechannel_attrs(struct svc_rqst *rqstp, | ||
591 | struct nfsd4_channel_attrs *session_fchan, | ||
592 | struct nfsd4_channel_attrs *fchan) | ||
593 | { | 618 | { |
594 | int status = 0; | 619 | u32 maxrpc = nfsd_serv->sv_max_mesg; |
595 | __u32 maxcount = nfsd_serv->sv_max_mesg; | ||
596 | 620 | ||
597 | /* headerpadsz set to zero in encode routine */ | 621 | new->maxreqs = numslots; |
622 | new->maxresp_cached = slotsize + NFSD_MIN_HDR_SEQ_SZ; | ||
623 | new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc); | ||
624 | new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc); | ||
625 | new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND); | ||
626 | } | ||
598 | 627 | ||
599 | /* Use the client's max request and max response size if possible */ | 628 | static void free_conn(struct nfsd4_conn *c) |
600 | if (fchan->maxreq_sz > maxcount) | 629 | { |
601 | fchan->maxreq_sz = maxcount; | 630 | svc_xprt_put(c->cn_xprt); |
602 | session_fchan->maxreq_sz = fchan->maxreq_sz; | 631 | kfree(c); |
632 | } | ||
603 | 633 | ||
604 | if (fchan->maxresp_sz > maxcount) | 634 | static void nfsd4_conn_lost(struct svc_xpt_user *u) |
605 | fchan->maxresp_sz = maxcount; | 635 | { |
606 | session_fchan->maxresp_sz = fchan->maxresp_sz; | 636 | struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); |
637 | struct nfs4_client *clp = c->cn_session->se_client; | ||
607 | 638 | ||
608 | /* Use the client's maxops if possible */ | 639 | spin_lock(&clp->cl_lock); |
609 | if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND) | 640 | if (!list_empty(&c->cn_persession)) { |
610 | fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND; | 641 | list_del(&c->cn_persession); |
611 | session_fchan->maxops = fchan->maxops; | 642 | free_conn(c); |
643 | } | ||
644 | spin_unlock(&clp->cl_lock); | ||
645 | } | ||
612 | 646 | ||
613 | /* FIXME: Error means no more DRC pages so the server should | 647 | static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) |
614 | * recover pages from existing sessions. For now fail session | 648 | { |
615 | * creation. | 649 | struct nfsd4_conn *conn; |
616 | */ | ||
617 | status = set_forechannel_drc_size(fchan); | ||
618 | 650 | ||
619 | session_fchan->maxresp_cached = fchan->maxresp_cached; | 651 | conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); |
620 | session_fchan->maxreqs = fchan->maxreqs; | 652 | if (!conn) |
653 | return NULL; | ||
654 | svc_xprt_get(rqstp->rq_xprt); | ||
655 | conn->cn_xprt = rqstp->rq_xprt; | ||
656 | conn->cn_flags = flags; | ||
657 | INIT_LIST_HEAD(&conn->cn_xpt_user.list); | ||
658 | return conn; | ||
659 | } | ||
621 | 660 | ||
622 | dprintk("%s status %d\n", __func__, status); | 661 | static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) |
623 | return status; | 662 | { |
663 | conn->cn_session = ses; | ||
664 | list_add(&conn->cn_persession, &ses->se_conns); | ||
624 | } | 665 | } |
625 | 666 | ||
626 | static void | 667 | static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) |
627 | free_session_slots(struct nfsd4_session *ses) | ||
628 | { | 668 | { |
629 | int i; | 669 | struct nfs4_client *clp = ses->se_client; |
630 | 670 | ||
631 | for (i = 0; i < ses->se_fchannel.maxreqs; i++) | 671 | spin_lock(&clp->cl_lock); |
632 | kfree(ses->se_slots[i]); | 672 | __nfsd4_hash_conn(conn, ses); |
673 | spin_unlock(&clp->cl_lock); | ||
633 | } | 674 | } |
634 | 675 | ||
635 | /* | 676 | static int nfsd4_register_conn(struct nfsd4_conn *conn) |
636 | * We don't actually need to cache the rpc and session headers, so we | ||
637 | * can allocate a little less for each slot: | ||
638 | */ | ||
639 | static inline int slot_bytes(struct nfsd4_channel_attrs *ca) | ||
640 | { | 677 | { |
641 | return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; | 678 | conn->cn_xpt_user.callback = nfsd4_conn_lost; |
679 | return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); | ||
642 | } | 680 | } |
643 | 681 | ||
644 | static int | 682 | static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) |
645 | alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, | ||
646 | struct nfsd4_create_session *cses) | ||
647 | { | 683 | { |
648 | struct nfsd4_session *new, tmp; | 684 | struct nfsd4_conn *conn; |
649 | struct nfsd4_slot *sp; | 685 | u32 flags = NFS4_CDFC4_FORE; |
650 | int idx, slotsize, cachesize, i; | 686 | int ret; |
651 | int status; | ||
652 | 687 | ||
653 | memset(&tmp, 0, sizeof(tmp)); | 688 | if (ses->se_flags & SESSION4_BACK_CHAN) |
689 | flags |= NFS4_CDFC4_BACK; | ||
690 | conn = alloc_conn(rqstp, flags); | ||
691 | if (!conn) | ||
692 | return nfserr_jukebox; | ||
693 | nfsd4_hash_conn(conn, ses); | ||
694 | ret = nfsd4_register_conn(conn); | ||
695 | if (ret) | ||
696 | /* oops; xprt is already down: */ | ||
697 | nfsd4_conn_lost(&conn->cn_xpt_user); | ||
698 | return nfs_ok; | ||
699 | } | ||
654 | 700 | ||
655 | /* FIXME: For now, we just accept the client back channel attributes. */ | 701 | static void nfsd4_del_conns(struct nfsd4_session *s) |
656 | tmp.se_bchannel = cses->back_channel; | 702 | { |
657 | status = init_forechannel_attrs(rqstp, &tmp.se_fchannel, | 703 | struct nfs4_client *clp = s->se_client; |
658 | &cses->fore_channel); | 704 | struct nfsd4_conn *c; |
659 | if (status) | ||
660 | goto out; | ||
661 | 705 | ||
662 | BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot) | 706 | spin_lock(&clp->cl_lock); |
663 | + sizeof(struct nfsd4_session) > PAGE_SIZE); | 707 | while (!list_empty(&s->se_conns)) { |
708 | c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); | ||
709 | list_del_init(&c->cn_persession); | ||
710 | spin_unlock(&clp->cl_lock); | ||
664 | 711 | ||
665 | status = nfserr_jukebox; | 712 | unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); |
666 | /* allocate struct nfsd4_session and slot table pointers in one piece */ | 713 | free_conn(c); |
667 | slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot *); | ||
668 | new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL); | ||
669 | if (!new) | ||
670 | goto out; | ||
671 | 714 | ||
672 | memcpy(new, &tmp, sizeof(*new)); | 715 | spin_lock(&clp->cl_lock); |
716 | } | ||
717 | spin_unlock(&clp->cl_lock); | ||
718 | } | ||
673 | 719 | ||
674 | /* allocate each struct nfsd4_slot and data cache in one piece */ | 720 | void free_session(struct kref *kref) |
675 | cachesize = slot_bytes(&new->se_fchannel); | 721 | { |
676 | for (i = 0; i < new->se_fchannel.maxreqs; i++) { | 722 | struct nfsd4_session *ses; |
677 | sp = kzalloc(sizeof(*sp) + cachesize, GFP_KERNEL); | 723 | int mem; |
678 | if (!sp) | 724 | |
679 | goto out_free; | 725 | ses = container_of(kref, struct nfsd4_session, se_ref); |
680 | new->se_slots[i] = sp; | 726 | nfsd4_del_conns(ses); |
727 | spin_lock(&nfsd_drc_lock); | ||
728 | mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel); | ||
729 | nfsd_drc_mem_used -= mem; | ||
730 | spin_unlock(&nfsd_drc_lock); | ||
731 | free_session_slots(ses); | ||
732 | kfree(ses); | ||
733 | } | ||
734 | |||
735 | static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses) | ||
736 | { | ||
737 | struct nfsd4_session *new; | ||
738 | struct nfsd4_channel_attrs *fchan = &cses->fore_channel; | ||
739 | int numslots, slotsize; | ||
740 | int status; | ||
741 | int idx; | ||
742 | |||
743 | /* | ||
744 | * Note decreasing slot size below client's request may | ||
745 | * make it difficult for client to function correctly, whereas | ||
746 | * decreasing the number of slots will (just?) affect | ||
747 | * performance. When short on memory we therefore prefer to | ||
748 | * decrease number of slots instead of their size. | ||
749 | */ | ||
750 | slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); | ||
751 | numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); | ||
752 | |||
753 | new = alloc_session(slotsize, numslots); | ||
754 | if (!new) { | ||
755 | nfsd4_put_drc_mem(slotsize, fchan->maxreqs); | ||
756 | return NULL; | ||
681 | } | 757 | } |
758 | init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize); | ||
682 | 759 | ||
683 | new->se_client = clp; | 760 | new->se_client = clp; |
684 | gen_sessionid(new); | 761 | gen_sessionid(new); |
685 | idx = hash_sessionid(&new->se_sessionid); | ||
686 | memcpy(clp->cl_sessionid.data, new->se_sessionid.data, | ||
687 | NFS4_MAX_SESSIONID_LEN); | ||
688 | 762 | ||
763 | INIT_LIST_HEAD(&new->se_conns); | ||
764 | |||
765 | new->se_cb_seq_nr = 1; | ||
689 | new->se_flags = cses->flags; | 766 | new->se_flags = cses->flags; |
767 | new->se_cb_prog = cses->callback_prog; | ||
690 | kref_init(&new->se_ref); | 768 | kref_init(&new->se_ref); |
769 | idx = hash_sessionid(&new->se_sessionid); | ||
691 | spin_lock(&client_lock); | 770 | spin_lock(&client_lock); |
692 | list_add(&new->se_hash, &sessionid_hashtbl[idx]); | 771 | list_add(&new->se_hash, &sessionid_hashtbl[idx]); |
693 | list_add(&new->se_perclnt, &clp->cl_sessions); | 772 | list_add(&new->se_perclnt, &clp->cl_sessions); |
694 | spin_unlock(&client_lock); | 773 | spin_unlock(&client_lock); |
695 | 774 | ||
696 | status = nfs_ok; | 775 | status = nfsd4_new_conn(rqstp, new); |
697 | out: | 776 | /* whoops: benny points out, status is ignored! (err, or bogus) */ |
698 | return status; | 777 | if (status) { |
699 | out_free: | 778 | free_session(&new->se_ref); |
700 | free_session_slots(new); | 779 | return NULL; |
701 | kfree(new); | 780 | } |
702 | goto out; | 781 | if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) { |
782 | struct sockaddr *sa = svc_addr(rqstp); | ||
783 | |||
784 | clp->cl_cb_session = new; | ||
785 | clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt; | ||
786 | svc_xprt_get(rqstp->rq_xprt); | ||
787 | rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); | ||
788 | clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); | ||
789 | nfsd4_probe_callback(clp); | ||
790 | } | ||
791 | return new; | ||
703 | } | 792 | } |
704 | 793 | ||
705 | /* caller must hold client_lock */ | 794 | /* caller must hold client_lock */ |
@@ -731,21 +820,6 @@ unhash_session(struct nfsd4_session *ses) | |||
731 | list_del(&ses->se_perclnt); | 820 | list_del(&ses->se_perclnt); |
732 | } | 821 | } |
733 | 822 | ||
734 | void | ||
735 | free_session(struct kref *kref) | ||
736 | { | ||
737 | struct nfsd4_session *ses; | ||
738 | int mem; | ||
739 | |||
740 | ses = container_of(kref, struct nfsd4_session, se_ref); | ||
741 | spin_lock(&nfsd_drc_lock); | ||
742 | mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel); | ||
743 | nfsd_drc_mem_used -= mem; | ||
744 | spin_unlock(&nfsd_drc_lock); | ||
745 | free_session_slots(ses); | ||
746 | kfree(ses); | ||
747 | } | ||
748 | |||
749 | /* must be called under the client_lock */ | 823 | /* must be called under the client_lock */ |
750 | static inline void | 824 | static inline void |
751 | renew_client_locked(struct nfs4_client *clp) | 825 | renew_client_locked(struct nfs4_client *clp) |
@@ -812,6 +886,13 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
812 | static inline void | 886 | static inline void |
813 | free_client(struct nfs4_client *clp) | 887 | free_client(struct nfs4_client *clp) |
814 | { | 888 | { |
889 | while (!list_empty(&clp->cl_sessions)) { | ||
890 | struct nfsd4_session *ses; | ||
891 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | ||
892 | se_perclnt); | ||
893 | list_del(&ses->se_perclnt); | ||
894 | nfsd4_put_session(ses); | ||
895 | } | ||
815 | if (clp->cl_cred.cr_group_info) | 896 | if (clp->cl_cred.cr_group_info) |
816 | put_group_info(clp->cl_cred.cr_group_info); | 897 | put_group_info(clp->cl_cred.cr_group_info); |
817 | kfree(clp->cl_principal); | 898 | kfree(clp->cl_principal); |
@@ -838,15 +919,12 @@ release_session_client(struct nfsd4_session *session) | |||
838 | static inline void | 919 | static inline void |
839 | unhash_client_locked(struct nfs4_client *clp) | 920 | unhash_client_locked(struct nfs4_client *clp) |
840 | { | 921 | { |
922 | struct nfsd4_session *ses; | ||
923 | |||
841 | mark_client_expired(clp); | 924 | mark_client_expired(clp); |
842 | list_del(&clp->cl_lru); | 925 | list_del(&clp->cl_lru); |
843 | while (!list_empty(&clp->cl_sessions)) { | 926 | list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) |
844 | struct nfsd4_session *ses; | 927 | list_del_init(&ses->se_hash); |
845 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | ||
846 | se_perclnt); | ||
847 | unhash_session(ses); | ||
848 | nfsd4_put_session(ses); | ||
849 | } | ||
850 | } | 928 | } |
851 | 929 | ||
852 | static void | 930 | static void |
@@ -875,7 +953,7 @@ expire_client(struct nfs4_client *clp) | |||
875 | sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient); | 953 | sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient); |
876 | release_openowner(sop); | 954 | release_openowner(sop); |
877 | } | 955 | } |
878 | nfsd4_set_callback_client(clp, NULL); | 956 | nfsd4_shutdown_callback(clp); |
879 | if (clp->cl_cb_conn.cb_xprt) | 957 | if (clp->cl_cb_conn.cb_xprt) |
880 | svc_xprt_put(clp->cl_cb_conn.cb_xprt); | 958 | svc_xprt_put(clp->cl_cb_conn.cb_xprt); |
881 | list_del(&clp->cl_idhash); | 959 | list_del(&clp->cl_idhash); |
@@ -960,6 +1038,8 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, | |||
960 | if (clp == NULL) | 1038 | if (clp == NULL) |
961 | return NULL; | 1039 | return NULL; |
962 | 1040 | ||
1041 | INIT_LIST_HEAD(&clp->cl_sessions); | ||
1042 | |||
963 | princ = svc_gss_principal(rqstp); | 1043 | princ = svc_gss_principal(rqstp); |
964 | if (princ) { | 1044 | if (princ) { |
965 | clp->cl_principal = kstrdup(princ, GFP_KERNEL); | 1045 | clp->cl_principal = kstrdup(princ, GFP_KERNEL); |
@@ -976,8 +1056,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, | |||
976 | INIT_LIST_HEAD(&clp->cl_strhash); | 1056 | INIT_LIST_HEAD(&clp->cl_strhash); |
977 | INIT_LIST_HEAD(&clp->cl_openowners); | 1057 | INIT_LIST_HEAD(&clp->cl_openowners); |
978 | INIT_LIST_HEAD(&clp->cl_delegations); | 1058 | INIT_LIST_HEAD(&clp->cl_delegations); |
979 | INIT_LIST_HEAD(&clp->cl_sessions); | ||
980 | INIT_LIST_HEAD(&clp->cl_lru); | 1059 | INIT_LIST_HEAD(&clp->cl_lru); |
1060 | spin_lock_init(&clp->cl_lock); | ||
1061 | INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); | ||
981 | clp->cl_time = get_seconds(); | 1062 | clp->cl_time = get_seconds(); |
982 | clear_bit(0, &clp->cl_cb_slot_busy); | 1063 | clear_bit(0, &clp->cl_cb_slot_busy); |
983 | rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); | 1064 | rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); |
@@ -986,7 +1067,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, | |||
986 | clp->cl_flavor = rqstp->rq_flavor; | 1067 | clp->cl_flavor = rqstp->rq_flavor; |
987 | copy_cred(&clp->cl_cred, &rqstp->rq_cred); | 1068 | copy_cred(&clp->cl_cred, &rqstp->rq_cred); |
988 | gen_confirm(clp); | 1069 | gen_confirm(clp); |
989 | 1070 | clp->cl_cb_session = NULL; | |
990 | return clp; | 1071 | return clp; |
991 | } | 1072 | } |
992 | 1073 | ||
@@ -1098,7 +1179,7 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, | |||
1098 | static void | 1179 | static void |
1099 | gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) | 1180 | gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) |
1100 | { | 1181 | { |
1101 | struct nfs4_cb_conn *cb = &clp->cl_cb_conn; | 1182 | struct nfs4_cb_conn *conn = &clp->cl_cb_conn; |
1102 | unsigned short expected_family; | 1183 | unsigned short expected_family; |
1103 | 1184 | ||
1104 | /* Currently, we only support tcp and tcp6 for the callback channel */ | 1185 | /* Currently, we only support tcp and tcp6 for the callback channel */ |
@@ -1111,24 +1192,23 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) | |||
1111 | else | 1192 | else |
1112 | goto out_err; | 1193 | goto out_err; |
1113 | 1194 | ||
1114 | cb->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, | 1195 | conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val, |
1115 | se->se_callback_addr_len, | 1196 | se->se_callback_addr_len, |
1116 | (struct sockaddr *) &cb->cb_addr, | 1197 | (struct sockaddr *)&conn->cb_addr, |
1117 | sizeof(cb->cb_addr)); | 1198 | sizeof(conn->cb_addr)); |
1118 | 1199 | ||
1119 | if (!cb->cb_addrlen || cb->cb_addr.ss_family != expected_family) | 1200 | if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) |
1120 | goto out_err; | 1201 | goto out_err; |
1121 | 1202 | ||
1122 | if (cb->cb_addr.ss_family == AF_INET6) | 1203 | if (conn->cb_addr.ss_family == AF_INET6) |
1123 | ((struct sockaddr_in6 *) &cb->cb_addr)->sin6_scope_id = scopeid; | 1204 | ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; |
1124 | 1205 | ||
1125 | cb->cb_minorversion = 0; | 1206 | conn->cb_prog = se->se_callback_prog; |
1126 | cb->cb_prog = se->se_callback_prog; | 1207 | conn->cb_ident = se->se_callback_ident; |
1127 | cb->cb_ident = se->se_callback_ident; | ||
1128 | return; | 1208 | return; |
1129 | out_err: | 1209 | out_err: |
1130 | cb->cb_addr.ss_family = AF_UNSPEC; | 1210 | conn->cb_addr.ss_family = AF_UNSPEC; |
1131 | cb->cb_addrlen = 0; | 1211 | conn->cb_addrlen = 0; |
1132 | dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " | 1212 | dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) " |
1133 | "will not receive delegations\n", | 1213 | "will not receive delegations\n", |
1134 | clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); | 1214 | clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); |
@@ -1415,7 +1495,9 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1415 | { | 1495 | { |
1416 | struct sockaddr *sa = svc_addr(rqstp); | 1496 | struct sockaddr *sa = svc_addr(rqstp); |
1417 | struct nfs4_client *conf, *unconf; | 1497 | struct nfs4_client *conf, *unconf; |
1498 | struct nfsd4_session *new; | ||
1418 | struct nfsd4_clid_slot *cs_slot = NULL; | 1499 | struct nfsd4_clid_slot *cs_slot = NULL; |
1500 | bool confirm_me = false; | ||
1419 | int status = 0; | 1501 | int status = 0; |
1420 | 1502 | ||
1421 | nfs4_lock_state(); | 1503 | nfs4_lock_state(); |
@@ -1438,7 +1520,6 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1438 | cs_slot->sl_seqid, cr_ses->seqid); | 1520 | cs_slot->sl_seqid, cr_ses->seqid); |
1439 | goto out; | 1521 | goto out; |
1440 | } | 1522 | } |
1441 | cs_slot->sl_seqid++; | ||
1442 | } else if (unconf) { | 1523 | } else if (unconf) { |
1443 | if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || | 1524 | if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || |
1444 | !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { | 1525 | !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { |
@@ -1451,25 +1532,10 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1451 | if (status) { | 1532 | if (status) { |
1452 | /* an unconfirmed replay returns misordered */ | 1533 | /* an unconfirmed replay returns misordered */ |
1453 | status = nfserr_seq_misordered; | 1534 | status = nfserr_seq_misordered; |
1454 | goto out_cache; | 1535 | goto out; |
1455 | } | 1536 | } |
1456 | 1537 | ||
1457 | cs_slot->sl_seqid++; /* from 0 to 1 */ | 1538 | confirm_me = true; |
1458 | move_to_confirmed(unconf); | ||
1459 | |||
1460 | if (cr_ses->flags & SESSION4_BACK_CHAN) { | ||
1461 | unconf->cl_cb_conn.cb_xprt = rqstp->rq_xprt; | ||
1462 | svc_xprt_get(rqstp->rq_xprt); | ||
1463 | rpc_copy_addr( | ||
1464 | (struct sockaddr *)&unconf->cl_cb_conn.cb_addr, | ||
1465 | sa); | ||
1466 | unconf->cl_cb_conn.cb_addrlen = svc_addr_len(sa); | ||
1467 | unconf->cl_cb_conn.cb_minorversion = | ||
1468 | cstate->minorversion; | ||
1469 | unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog; | ||
1470 | unconf->cl_cb_seq_nr = 1; | ||
1471 | nfsd4_probe_callback(unconf, &unconf->cl_cb_conn); | ||
1472 | } | ||
1473 | conf = unconf; | 1539 | conf = unconf; |
1474 | } else { | 1540 | } else { |
1475 | status = nfserr_stale_clientid; | 1541 | status = nfserr_stale_clientid; |
@@ -1477,22 +1543,30 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1477 | } | 1543 | } |
1478 | 1544 | ||
1479 | /* | 1545 | /* |
1546 | * XXX: we should probably set this at creation time, and check | ||
1547 | * for consistent minorversion use throughout: | ||
1548 | */ | ||
1549 | conf->cl_minorversion = 1; | ||
1550 | /* | ||
1480 | * We do not support RDMA or persistent sessions | 1551 | * We do not support RDMA or persistent sessions |
1481 | */ | 1552 | */ |
1482 | cr_ses->flags &= ~SESSION4_PERSIST; | 1553 | cr_ses->flags &= ~SESSION4_PERSIST; |
1483 | cr_ses->flags &= ~SESSION4_RDMA; | 1554 | cr_ses->flags &= ~SESSION4_RDMA; |
1484 | 1555 | ||
1485 | status = alloc_init_session(rqstp, conf, cr_ses); | 1556 | status = nfserr_jukebox; |
1486 | if (status) | 1557 | new = alloc_init_session(rqstp, conf, cr_ses); |
1558 | if (!new) | ||
1487 | goto out; | 1559 | goto out; |
1488 | 1560 | status = nfs_ok; | |
1489 | memcpy(cr_ses->sessionid.data, conf->cl_sessionid.data, | 1561 | memcpy(cr_ses->sessionid.data, new->se_sessionid.data, |
1490 | NFS4_MAX_SESSIONID_LEN); | 1562 | NFS4_MAX_SESSIONID_LEN); |
1563 | cs_slot->sl_seqid++; | ||
1491 | cr_ses->seqid = cs_slot->sl_seqid; | 1564 | cr_ses->seqid = cs_slot->sl_seqid; |
1492 | 1565 | ||
1493 | out_cache: | ||
1494 | /* cache solo and embedded create sessions under the state lock */ | 1566 | /* cache solo and embedded create sessions under the state lock */ |
1495 | nfsd4_cache_create_session(cr_ses, cs_slot, status); | 1567 | nfsd4_cache_create_session(cr_ses, cs_slot, status); |
1568 | if (confirm_me) | ||
1569 | move_to_confirmed(conf); | ||
1496 | out: | 1570 | out: |
1497 | nfs4_unlock_state(); | 1571 | nfs4_unlock_state(); |
1498 | dprintk("%s returns %d\n", __func__, ntohl(status)); | 1572 | dprintk("%s returns %d\n", __func__, ntohl(status)); |
@@ -1546,8 +1620,11 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
1546 | 1620 | ||
1547 | nfs4_lock_state(); | 1621 | nfs4_lock_state(); |
1548 | /* wait for callbacks */ | 1622 | /* wait for callbacks */ |
1549 | nfsd4_set_callback_client(ses->se_client, NULL); | 1623 | nfsd4_shutdown_callback(ses->se_client); |
1550 | nfs4_unlock_state(); | 1624 | nfs4_unlock_state(); |
1625 | |||
1626 | nfsd4_del_conns(ses); | ||
1627 | |||
1551 | nfsd4_put_session(ses); | 1628 | nfsd4_put_session(ses); |
1552 | status = nfs_ok; | 1629 | status = nfs_ok; |
1553 | out: | 1630 | out: |
@@ -1555,6 +1632,40 @@ out: | |||
1555 | return status; | 1632 | return status; |
1556 | } | 1633 | } |
1557 | 1634 | ||
1635 | static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) | ||
1636 | { | ||
1637 | struct nfsd4_conn *c; | ||
1638 | |||
1639 | list_for_each_entry(c, &s->se_conns, cn_persession) { | ||
1640 | if (c->cn_xprt == xpt) { | ||
1641 | return c; | ||
1642 | } | ||
1643 | } | ||
1644 | return NULL; | ||
1645 | } | ||
1646 | |||
1647 | static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) | ||
1648 | { | ||
1649 | struct nfs4_client *clp = ses->se_client; | ||
1650 | struct nfsd4_conn *c; | ||
1651 | int ret; | ||
1652 | |||
1653 | spin_lock(&clp->cl_lock); | ||
1654 | c = __nfsd4_find_conn(new->cn_xprt, ses); | ||
1655 | if (c) { | ||
1656 | spin_unlock(&clp->cl_lock); | ||
1657 | free_conn(new); | ||
1658 | return; | ||
1659 | } | ||
1660 | __nfsd4_hash_conn(new, ses); | ||
1661 | spin_unlock(&clp->cl_lock); | ||
1662 | ret = nfsd4_register_conn(new); | ||
1663 | if (ret) | ||
1664 | /* oops; xprt is already down: */ | ||
1665 | nfsd4_conn_lost(&new->cn_xpt_user); | ||
1666 | return; | ||
1667 | } | ||
1668 | |||
1558 | __be32 | 1669 | __be32 |
1559 | nfsd4_sequence(struct svc_rqst *rqstp, | 1670 | nfsd4_sequence(struct svc_rqst *rqstp, |
1560 | struct nfsd4_compound_state *cstate, | 1671 | struct nfsd4_compound_state *cstate, |
@@ -1563,11 +1674,20 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1563 | struct nfsd4_compoundres *resp = rqstp->rq_resp; | 1674 | struct nfsd4_compoundres *resp = rqstp->rq_resp; |
1564 | struct nfsd4_session *session; | 1675 | struct nfsd4_session *session; |
1565 | struct nfsd4_slot *slot; | 1676 | struct nfsd4_slot *slot; |
1677 | struct nfsd4_conn *conn; | ||
1566 | int status; | 1678 | int status; |
1567 | 1679 | ||
1568 | if (resp->opcnt != 1) | 1680 | if (resp->opcnt != 1) |
1569 | return nfserr_sequence_pos; | 1681 | return nfserr_sequence_pos; |
1570 | 1682 | ||
1683 | /* | ||
1684 | * Will be either used or freed by nfsd4_sequence_check_conn | ||
1685 | * below. | ||
1686 | */ | ||
1687 | conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); | ||
1688 | if (!conn) | ||
1689 | return nfserr_jukebox; | ||
1690 | |||
1571 | spin_lock(&client_lock); | 1691 | spin_lock(&client_lock); |
1572 | status = nfserr_badsession; | 1692 | status = nfserr_badsession; |
1573 | session = find_in_sessionid_hashtbl(&seq->sessionid); | 1693 | session = find_in_sessionid_hashtbl(&seq->sessionid); |
@@ -1599,6 +1719,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
1599 | if (status) | 1719 | if (status) |
1600 | goto out; | 1720 | goto out; |
1601 | 1721 | ||
1722 | nfsd4_sequence_check_conn(conn, session); | ||
1723 | conn = NULL; | ||
1724 | |||
1602 | /* Success! bump slot seqid */ | 1725 | /* Success! bump slot seqid */ |
1603 | slot->sl_inuse = true; | 1726 | slot->sl_inuse = true; |
1604 | slot->sl_seqid = seq->seqid; | 1727 | slot->sl_seqid = seq->seqid; |
@@ -1613,6 +1736,7 @@ out: | |||
1613 | nfsd4_get_session(cstate->session); | 1736 | nfsd4_get_session(cstate->session); |
1614 | atomic_inc(&session->se_client->cl_refcount); | 1737 | atomic_inc(&session->se_client->cl_refcount); |
1615 | } | 1738 | } |
1739 | kfree(conn); | ||
1616 | spin_unlock(&client_lock); | 1740 | spin_unlock(&client_lock); |
1617 | dprintk("%s: return %d\n", __func__, ntohl(status)); | 1741 | dprintk("%s: return %d\n", __func__, ntohl(status)); |
1618 | return status; | 1742 | return status; |
@@ -1747,6 +1871,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
1747 | goto out; | 1871 | goto out; |
1748 | gen_clid(new); | 1872 | gen_clid(new); |
1749 | } | 1873 | } |
1874 | /* | ||
1875 | * XXX: we should probably set this at creation time, and check | ||
1876 | * for consistent minorversion use throughout: | ||
1877 | */ | ||
1878 | new->cl_minorversion = 0; | ||
1750 | gen_callback(new, setclid, rpc_get_scope_id(sa)); | 1879 | gen_callback(new, setclid, rpc_get_scope_id(sa)); |
1751 | add_to_unconfirmed(new, strhashval); | 1880 | add_to_unconfirmed(new, strhashval); |
1752 | setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; | 1881 | setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; |
@@ -1807,7 +1936,8 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
1807 | status = nfserr_clid_inuse; | 1936 | status = nfserr_clid_inuse; |
1808 | else { | 1937 | else { |
1809 | atomic_set(&conf->cl_cb_set, 0); | 1938 | atomic_set(&conf->cl_cb_set, 0); |
1810 | nfsd4_probe_callback(conf, &unconf->cl_cb_conn); | 1939 | nfsd4_change_callback(conf, &unconf->cl_cb_conn); |
1940 | nfsd4_probe_callback(conf); | ||
1811 | expire_client(unconf); | 1941 | expire_client(unconf); |
1812 | status = nfs_ok; | 1942 | status = nfs_ok; |
1813 | 1943 | ||
@@ -1841,7 +1971,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
1841 | } | 1971 | } |
1842 | move_to_confirmed(unconf); | 1972 | move_to_confirmed(unconf); |
1843 | conf = unconf; | 1973 | conf = unconf; |
1844 | nfsd4_probe_callback(conf, &conf->cl_cb_conn); | 1974 | nfsd4_probe_callback(conf); |
1845 | status = nfs_ok; | 1975 | status = nfs_ok; |
1846 | } | 1976 | } |
1847 | } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) | 1977 | } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm))) |
@@ -2188,22 +2318,6 @@ void nfsd_release_deleg_cb(struct file_lock *fl) | |||
2188 | } | 2318 | } |
2189 | 2319 | ||
2190 | /* | 2320 | /* |
2191 | * Set the delegation file_lock back pointer. | ||
2192 | * | ||
2193 | * Called from setlease() with lock_kernel() held. | ||
2194 | */ | ||
2195 | static | ||
2196 | void nfsd_copy_lock_deleg_cb(struct file_lock *new, struct file_lock *fl) | ||
2197 | { | ||
2198 | struct nfs4_delegation *dp = (struct nfs4_delegation *)new->fl_owner; | ||
2199 | |||
2200 | dprintk("NFSD: nfsd_copy_lock_deleg_cb: new fl %p dp %p\n", new, dp); | ||
2201 | if (!dp) | ||
2202 | return; | ||
2203 | dp->dl_flock = new; | ||
2204 | } | ||
2205 | |||
2206 | /* | ||
2207 | * Called from setlease() with lock_kernel() held | 2321 | * Called from setlease() with lock_kernel() held |
2208 | */ | 2322 | */ |
2209 | static | 2323 | static |
@@ -2233,7 +2347,6 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) | |||
2233 | static const struct lock_manager_operations nfsd_lease_mng_ops = { | 2347 | static const struct lock_manager_operations nfsd_lease_mng_ops = { |
2234 | .fl_break = nfsd_break_deleg_cb, | 2348 | .fl_break = nfsd_break_deleg_cb, |
2235 | .fl_release_private = nfsd_release_deleg_cb, | 2349 | .fl_release_private = nfsd_release_deleg_cb, |
2236 | .fl_copy_lock = nfsd_copy_lock_deleg_cb, | ||
2237 | .fl_mylease = nfsd_same_client_deleg_cb, | 2350 | .fl_mylease = nfsd_same_client_deleg_cb, |
2238 | .fl_change = nfsd_change_deleg_cb, | 2351 | .fl_change = nfsd_change_deleg_cb, |
2239 | }; | 2352 | }; |
@@ -2492,7 +2605,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2492 | struct nfs4_delegation *dp; | 2605 | struct nfs4_delegation *dp; |
2493 | struct nfs4_stateowner *sop = stp->st_stateowner; | 2606 | struct nfs4_stateowner *sop = stp->st_stateowner; |
2494 | int cb_up = atomic_read(&sop->so_client->cl_cb_set); | 2607 | int cb_up = atomic_read(&sop->so_client->cl_cb_set); |
2495 | struct file_lock fl, *flp = &fl; | 2608 | struct file_lock *fl; |
2496 | int status, flag = 0; | 2609 | int status, flag = 0; |
2497 | 2610 | ||
2498 | flag = NFS4_OPEN_DELEGATE_NONE; | 2611 | flag = NFS4_OPEN_DELEGATE_NONE; |
@@ -2526,21 +2639,28 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2526 | flag = NFS4_OPEN_DELEGATE_NONE; | 2639 | flag = NFS4_OPEN_DELEGATE_NONE; |
2527 | goto out; | 2640 | goto out; |
2528 | } | 2641 | } |
2529 | locks_init_lock(&fl); | 2642 | status = -ENOMEM; |
2530 | fl.fl_lmops = &nfsd_lease_mng_ops; | 2643 | fl = locks_alloc_lock(); |
2531 | fl.fl_flags = FL_LEASE; | 2644 | if (!fl) |
2532 | fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | 2645 | goto out; |
2533 | fl.fl_end = OFFSET_MAX; | 2646 | locks_init_lock(fl); |
2534 | fl.fl_owner = (fl_owner_t)dp; | 2647 | fl->fl_lmops = &nfsd_lease_mng_ops; |
2535 | fl.fl_file = find_readable_file(stp->st_file); | 2648 | fl->fl_flags = FL_LEASE; |
2536 | BUG_ON(!fl.fl_file); | 2649 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; |
2537 | fl.fl_pid = current->tgid; | 2650 | fl->fl_end = OFFSET_MAX; |
2651 | fl->fl_owner = (fl_owner_t)dp; | ||
2652 | fl->fl_file = find_readable_file(stp->st_file); | ||
2653 | BUG_ON(!fl->fl_file); | ||
2654 | fl->fl_pid = current->tgid; | ||
2655 | dp->dl_flock = fl; | ||
2538 | 2656 | ||
2539 | /* vfs_setlease checks to see if delegation should be handed out. | 2657 | /* vfs_setlease checks to see if delegation should be handed out. |
2540 | * the lock_manager callbacks fl_mylease and fl_change are used | 2658 | * the lock_manager callbacks fl_mylease and fl_change are used |
2541 | */ | 2659 | */ |
2542 | if ((status = vfs_setlease(fl.fl_file, fl.fl_type, &flp))) { | 2660 | if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { |
2543 | dprintk("NFSD: setlease failed [%d], no delegation\n", status); | 2661 | dprintk("NFSD: setlease failed [%d], no delegation\n", status); |
2662 | dp->dl_flock = NULL; | ||
2663 | locks_free_lock(fl); | ||
2544 | unhash_delegation(dp); | 2664 | unhash_delegation(dp); |
2545 | flag = NFS4_OPEN_DELEGATE_NONE; | 2665 | flag = NFS4_OPEN_DELEGATE_NONE; |
2546 | goto out; | 2666 | goto out; |
@@ -2944,7 +3064,11 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | |||
2944 | if (STALE_STATEID(stateid)) | 3064 | if (STALE_STATEID(stateid)) |
2945 | goto out; | 3065 | goto out; |
2946 | 3066 | ||
2947 | status = nfserr_bad_stateid; | 3067 | /* |
3068 | * We assume that any stateid that has the current boot time, | ||
3069 | * but that we can't find, is expired: | ||
3070 | */ | ||
3071 | status = nfserr_expired; | ||
2948 | if (is_delegation_stateid(stateid)) { | 3072 | if (is_delegation_stateid(stateid)) { |
2949 | dp = find_delegation_stateid(ino, stateid); | 3073 | dp = find_delegation_stateid(ino, stateid); |
2950 | if (!dp) | 3074 | if (!dp) |
@@ -2964,6 +3088,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | |||
2964 | stp = find_stateid(stateid, flags); | 3088 | stp = find_stateid(stateid, flags); |
2965 | if (!stp) | 3089 | if (!stp) |
2966 | goto out; | 3090 | goto out; |
3091 | status = nfserr_bad_stateid; | ||
2967 | if (nfs4_check_fh(current_fh, stp)) | 3092 | if (nfs4_check_fh(current_fh, stp)) |
2968 | goto out; | 3093 | goto out; |
2969 | if (!stp->st_stateowner->so_confirmed) | 3094 | if (!stp->st_stateowner->so_confirmed) |
@@ -3038,8 +3163,9 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, | |||
3038 | * a replayed close: | 3163 | * a replayed close: |
3039 | */ | 3164 | */ |
3040 | sop = search_close_lru(stateid->si_stateownerid, flags); | 3165 | sop = search_close_lru(stateid->si_stateownerid, flags); |
3166 | /* It's not stale; let's assume it's expired: */ | ||
3041 | if (sop == NULL) | 3167 | if (sop == NULL) |
3042 | return nfserr_bad_stateid; | 3168 | return nfserr_expired; |
3043 | *sopp = sop; | 3169 | *sopp = sop; |
3044 | goto check_replay; | 3170 | goto check_replay; |
3045 | } | 3171 | } |
@@ -3304,6 +3430,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3304 | status = nfserr_bad_stateid; | 3430 | status = nfserr_bad_stateid; |
3305 | if (!is_delegation_stateid(stateid)) | 3431 | if (!is_delegation_stateid(stateid)) |
3306 | goto out; | 3432 | goto out; |
3433 | status = nfserr_expired; | ||
3307 | dp = find_delegation_stateid(inode, stateid); | 3434 | dp = find_delegation_stateid(inode, stateid); |
3308 | if (!dp) | 3435 | if (!dp) |
3309 | goto out; | 3436 | goto out; |
@@ -3895,7 +4022,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner) | |||
3895 | struct inode *inode = filp->fi_inode; | 4022 | struct inode *inode = filp->fi_inode; |
3896 | int status = 0; | 4023 | int status = 0; |
3897 | 4024 | ||
3898 | lock_kernel(); | 4025 | lock_flocks(); |
3899 | for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { | 4026 | for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { |
3900 | if ((*flpp)->fl_owner == (fl_owner_t)lowner) { | 4027 | if ((*flpp)->fl_owner == (fl_owner_t)lowner) { |
3901 | status = 1; | 4028 | status = 1; |
@@ -3903,7 +4030,7 @@ check_for_locks(struct nfs4_file *filp, struct nfs4_stateowner *lowner) | |||
3903 | } | 4030 | } |
3904 | } | 4031 | } |
3905 | out: | 4032 | out: |
3906 | unlock_kernel(); | 4033 | unlock_flocks(); |
3907 | return status; | 4034 | return status; |
3908 | } | 4035 | } |
3909 | 4036 | ||