diff options
author | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-07-02 08:39:09 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-07-02 08:39:09 -0400 |
commit | d2f6409584e2c62ffad81690562330ff3bf4a458 (patch) | |
tree | 3bdfb97d0b51be2f7f414f2107e97603c1206abb /fs/nfs/nfs4state.c | |
parent | e1b09eba2686eca94a3a188042b518df6044a3c1 (diff) | |
parent | 4a89a04f1ee21a7c1f4413f1ad7dcfac50ff9b63 (diff) |
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r-- | fs/nfs/nfs4state.c | 193 |
1 files changed, 84 insertions, 109 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 231cebce3c87..afe587d82f1e 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -46,24 +46,18 @@ | |||
46 | #include <linux/workqueue.h> | 46 | #include <linux/workqueue.h> |
47 | #include <linux/bitops.h> | 47 | #include <linux/bitops.h> |
48 | 48 | ||
49 | #include "nfs4_fs.h" | ||
49 | #include "callback.h" | 50 | #include "callback.h" |
50 | #include "delegation.h" | 51 | #include "delegation.h" |
51 | 52 | ||
52 | #define OPENOWNER_POOL_SIZE 8 | 53 | #define OPENOWNER_POOL_SIZE 8 |
53 | 54 | ||
54 | static DEFINE_SPINLOCK(state_spinlock); | 55 | const nfs4_stateid zero_stateid; |
55 | |||
56 | nfs4_stateid zero_stateid; | ||
57 | |||
58 | #if 0 | ||
59 | nfs4_stateid one_stateid = | ||
60 | { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | ||
61 | #endif | ||
62 | 56 | ||
57 | static DEFINE_SPINLOCK(state_spinlock); | ||
63 | static LIST_HEAD(nfs4_clientid_list); | 58 | static LIST_HEAD(nfs4_clientid_list); |
64 | 59 | ||
65 | static void nfs4_recover_state(void *); | 60 | static void nfs4_recover_state(void *); |
66 | extern void nfs4_renew_state(void *); | ||
67 | 61 | ||
68 | void | 62 | void |
69 | init_nfsv4_state(struct nfs_server *server) | 63 | init_nfsv4_state(struct nfs_server *server) |
@@ -116,6 +110,7 @@ nfs4_alloc_client(struct in_addr *addr) | |||
116 | INIT_LIST_HEAD(&clp->cl_superblocks); | 110 | INIT_LIST_HEAD(&clp->cl_superblocks); |
117 | init_waitqueue_head(&clp->cl_waitq); | 111 | init_waitqueue_head(&clp->cl_waitq); |
118 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); | 112 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client"); |
113 | clp->cl_rpcclient = ERR_PTR(-EINVAL); | ||
119 | clp->cl_boot_time = CURRENT_TIME; | 114 | clp->cl_boot_time = CURRENT_TIME; |
120 | clp->cl_state = 1 << NFS4CLNT_OK; | 115 | clp->cl_state = 1 << NFS4CLNT_OK; |
121 | return clp; | 116 | return clp; |
@@ -137,7 +132,7 @@ nfs4_free_client(struct nfs4_client *clp) | |||
137 | if (clp->cl_cred) | 132 | if (clp->cl_cred) |
138 | put_rpccred(clp->cl_cred); | 133 | put_rpccred(clp->cl_cred); |
139 | nfs_idmap_delete(clp); | 134 | nfs_idmap_delete(clp); |
140 | if (clp->cl_rpcclient) | 135 | if (!IS_ERR(clp->cl_rpcclient)) |
141 | rpc_shutdown_client(clp->cl_rpcclient); | 136 | rpc_shutdown_client(clp->cl_rpcclient); |
142 | kfree(clp); | 137 | kfree(clp); |
143 | nfs_callback_down(); | 138 | nfs_callback_down(); |
@@ -365,7 +360,7 @@ nfs4_alloc_open_state(void) | |||
365 | atomic_set(&state->count, 1); | 360 | atomic_set(&state->count, 1); |
366 | INIT_LIST_HEAD(&state->lock_states); | 361 | INIT_LIST_HEAD(&state->lock_states); |
367 | init_MUTEX(&state->lock_sema); | 362 | init_MUTEX(&state->lock_sema); |
368 | rwlock_init(&state->state_lock); | 363 | spin_lock_init(&state->state_lock); |
369 | return state; | 364 | return state; |
370 | } | 365 | } |
371 | 366 | ||
@@ -547,16 +542,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | |||
547 | return NULL; | 542 | return NULL; |
548 | } | 543 | } |
549 | 544 | ||
550 | struct nfs4_lock_state * | ||
551 | nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | ||
552 | { | ||
553 | struct nfs4_lock_state *lsp; | ||
554 | read_lock(&state->state_lock); | ||
555 | lsp = __nfs4_find_lock_state(state, fl_owner); | ||
556 | read_unlock(&state->state_lock); | ||
557 | return lsp; | ||
558 | } | ||
559 | |||
560 | /* | 545 | /* |
561 | * Return a compatible lock_state. If no initialized lock_state structure | 546 | * Return a compatible lock_state. If no initialized lock_state structure |
562 | * exists, return an uninitialized one. | 547 | * exists, return an uninitialized one. |
@@ -573,14 +558,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
573 | return NULL; | 558 | return NULL; |
574 | lsp->ls_flags = 0; | 559 | lsp->ls_flags = 0; |
575 | lsp->ls_seqid = 0; /* arbitrary */ | 560 | lsp->ls_seqid = 0; /* arbitrary */ |
576 | lsp->ls_id = -1; | ||
577 | memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); | 561 | memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); |
578 | atomic_set(&lsp->ls_count, 1); | 562 | atomic_set(&lsp->ls_count, 1); |
579 | lsp->ls_owner = fl_owner; | 563 | lsp->ls_owner = fl_owner; |
580 | INIT_LIST_HEAD(&lsp->ls_locks); | ||
581 | spin_lock(&clp->cl_lock); | 564 | spin_lock(&clp->cl_lock); |
582 | lsp->ls_id = nfs4_alloc_lockowner_id(clp); | 565 | lsp->ls_id = nfs4_alloc_lockowner_id(clp); |
583 | spin_unlock(&clp->cl_lock); | 566 | spin_unlock(&clp->cl_lock); |
567 | INIT_LIST_HEAD(&lsp->ls_locks); | ||
584 | return lsp; | 568 | return lsp; |
585 | } | 569 | } |
586 | 570 | ||
@@ -590,121 +574,112 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
590 | * | 574 | * |
591 | * The caller must be holding state->lock_sema and clp->cl_sem | 575 | * The caller must be holding state->lock_sema and clp->cl_sem |
592 | */ | 576 | */ |
593 | struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) | 577 | static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) |
594 | { | 578 | { |
595 | struct nfs4_lock_state * lsp; | 579 | struct nfs4_lock_state *lsp, *new = NULL; |
596 | 580 | ||
597 | lsp = nfs4_find_lock_state(state, owner); | 581 | for(;;) { |
598 | if (lsp == NULL) | 582 | spin_lock(&state->state_lock); |
599 | lsp = nfs4_alloc_lock_state(state, owner); | 583 | lsp = __nfs4_find_lock_state(state, owner); |
584 | if (lsp != NULL) | ||
585 | break; | ||
586 | if (new != NULL) { | ||
587 | new->ls_state = state; | ||
588 | list_add(&new->ls_locks, &state->lock_states); | ||
589 | set_bit(LK_STATE_IN_USE, &state->flags); | ||
590 | lsp = new; | ||
591 | new = NULL; | ||
592 | break; | ||
593 | } | ||
594 | spin_unlock(&state->state_lock); | ||
595 | new = nfs4_alloc_lock_state(state, owner); | ||
596 | if (new == NULL) | ||
597 | return NULL; | ||
598 | } | ||
599 | spin_unlock(&state->state_lock); | ||
600 | kfree(new); | ||
600 | return lsp; | 601 | return lsp; |
601 | } | 602 | } |
602 | 603 | ||
603 | /* | 604 | /* |
604 | * Byte-range lock aware utility to initialize the stateid of read/write | 605 | * Release reference to lock_state, and free it if we see that |
605 | * requests. | 606 | * it is no longer in use |
606 | */ | 607 | */ |
607 | void | 608 | static void nfs4_put_lock_state(struct nfs4_lock_state *lsp) |
608 | nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) | ||
609 | { | 609 | { |
610 | if (test_bit(LK_STATE_IN_USE, &state->flags)) { | 610 | struct nfs4_state *state; |
611 | struct nfs4_lock_state *lsp; | ||
612 | 611 | ||
613 | lsp = nfs4_find_lock_state(state, fl_owner); | 612 | if (lsp == NULL) |
614 | if (lsp) { | 613 | return; |
615 | memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); | 614 | state = lsp->ls_state; |
616 | nfs4_put_lock_state(lsp); | 615 | if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) |
617 | return; | 616 | return; |
618 | } | 617 | list_del(&lsp->ls_locks); |
619 | } | 618 | if (list_empty(&state->lock_states)) |
620 | memcpy(dst, &state->stateid, sizeof(*dst)); | 619 | clear_bit(LK_STATE_IN_USE, &state->flags); |
620 | spin_unlock(&state->state_lock); | ||
621 | kfree(lsp); | ||
621 | } | 622 | } |
622 | 623 | ||
623 | /* | 624 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) |
624 | * Called with state->lock_sema and clp->cl_sem held. | ||
625 | */ | ||
626 | void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) | ||
627 | { | 625 | { |
628 | if (status == NFS_OK || seqid_mutating_err(-status)) | 626 | struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; |
629 | lsp->ls_seqid++; | ||
630 | } | ||
631 | 627 | ||
632 | /* | 628 | dst->fl_u.nfs4_fl.owner = lsp; |
633 | * Check to see if the request lock (type FL_UNLK) effects the fl lock. | 629 | atomic_inc(&lsp->ls_count); |
634 | * | 630 | } |
635 | * fl and request must have the same posix owner | ||
636 | * | ||
637 | * return: | ||
638 | * 0 -> fl not effected by request | ||
639 | * 1 -> fl consumed by request | ||
640 | */ | ||
641 | 631 | ||
642 | static int | 632 | static void nfs4_fl_release_lock(struct file_lock *fl) |
643 | nfs4_check_unlock(struct file_lock *fl, struct file_lock *request) | ||
644 | { | 633 | { |
645 | if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end) | 634 | nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); |
646 | return 1; | ||
647 | return 0; | ||
648 | } | 635 | } |
649 | 636 | ||
650 | /* | 637 | static struct file_lock_operations nfs4_fl_lock_ops = { |
651 | * Post an initialized lock_state on the state->lock_states list. | 638 | .fl_copy_lock = nfs4_fl_copy_lock, |
652 | */ | 639 | .fl_release_private = nfs4_fl_release_lock, |
653 | void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) | 640 | }; |
641 | |||
642 | int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) | ||
654 | { | 643 | { |
655 | if (!list_empty(&lsp->ls_locks)) | 644 | struct nfs4_lock_state *lsp; |
656 | return; | 645 | |
657 | atomic_inc(&lsp->ls_count); | 646 | if (fl->fl_ops != NULL) |
658 | write_lock(&state->state_lock); | 647 | return 0; |
659 | list_add(&lsp->ls_locks, &state->lock_states); | 648 | lsp = nfs4_get_lock_state(state, fl->fl_owner); |
660 | set_bit(LK_STATE_IN_USE, &state->flags); | 649 | if (lsp == NULL) |
661 | write_unlock(&state->state_lock); | 650 | return -ENOMEM; |
651 | fl->fl_u.nfs4_fl.owner = lsp; | ||
652 | fl->fl_ops = &nfs4_fl_lock_ops; | ||
653 | return 0; | ||
662 | } | 654 | } |
663 | 655 | ||
664 | /* | 656 | /* |
665 | * to decide to 'reap' lock state: | 657 | * Byte-range lock aware utility to initialize the stateid of read/write |
666 | * 1) search i_flock for file_locks with fl.lock_state = to ls. | 658 | * requests. |
667 | * 2) determine if unlock will consume found lock. | ||
668 | * if so, reap | ||
669 | * | ||
670 | * else, don't reap. | ||
671 | * | ||
672 | */ | 659 | */ |
673 | void | 660 | void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) |
674 | nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) | ||
675 | { | 661 | { |
676 | struct inode *inode = state->inode; | 662 | struct nfs4_lock_state *lsp; |
677 | struct file_lock *fl; | ||
678 | 663 | ||
679 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | 664 | memcpy(dst, &state->stateid, sizeof(*dst)); |
680 | if (!(fl->fl_flags & FL_POSIX)) | 665 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) |
681 | continue; | 666 | return; |
682 | if (fl->fl_owner != lsp->ls_owner) | ||
683 | continue; | ||
684 | /* Exit if we find at least one lock which is not consumed */ | ||
685 | if (nfs4_check_unlock(fl,request) == 0) | ||
686 | return; | ||
687 | } | ||
688 | 667 | ||
689 | write_lock(&state->state_lock); | 668 | spin_lock(&state->state_lock); |
690 | list_del_init(&lsp->ls_locks); | 669 | lsp = __nfs4_find_lock_state(state, fl_owner); |
691 | if (list_empty(&state->lock_states)) | 670 | if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) |
692 | clear_bit(LK_STATE_IN_USE, &state->flags); | 671 | memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); |
693 | write_unlock(&state->state_lock); | 672 | spin_unlock(&state->state_lock); |
694 | nfs4_put_lock_state(lsp); | 673 | nfs4_put_lock_state(lsp); |
695 | } | 674 | } |
696 | 675 | ||
697 | /* | 676 | /* |
698 | * Release reference to lock_state, and free it if we see that | 677 | * Called with state->lock_sema and clp->cl_sem held. |
699 | * it is no longer in use | 678 | */ |
700 | */ | 679 | void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) |
701 | void | ||
702 | nfs4_put_lock_state(struct nfs4_lock_state *lsp) | ||
703 | { | 680 | { |
704 | if (!atomic_dec_and_test(&lsp->ls_count)) | 681 | if (status == NFS_OK || seqid_mutating_err(-status)) |
705 | return; | 682 | lsp->ls_seqid++; |
706 | BUG_ON (!list_empty(&lsp->ls_locks)); | ||
707 | kfree(lsp); | ||
708 | } | 683 | } |
709 | 684 | ||
710 | /* | 685 | /* |