diff options
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r-- | fs/nfs/nfs4state.c | 178 |
1 files changed, 79 insertions, 99 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 591ad1d51880..afe587d82f1e 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -360,7 +360,7 @@ nfs4_alloc_open_state(void) | |||
360 | atomic_set(&state->count, 1); | 360 | atomic_set(&state->count, 1); |
361 | INIT_LIST_HEAD(&state->lock_states); | 361 | INIT_LIST_HEAD(&state->lock_states); |
362 | init_MUTEX(&state->lock_sema); | 362 | init_MUTEX(&state->lock_sema); |
363 | rwlock_init(&state->state_lock); | 363 | spin_lock_init(&state->state_lock); |
364 | return state; | 364 | return state; |
365 | } | 365 | } |
366 | 366 | ||
@@ -542,16 +542,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | |||
542 | return NULL; | 542 | return NULL; |
543 | } | 543 | } |
544 | 544 | ||
545 | struct nfs4_lock_state * | ||
546 | nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | ||
547 | { | ||
548 | struct nfs4_lock_state *lsp; | ||
549 | read_lock(&state->state_lock); | ||
550 | lsp = __nfs4_find_lock_state(state, fl_owner); | ||
551 | read_unlock(&state->state_lock); | ||
552 | return lsp; | ||
553 | } | ||
554 | |||
555 | /* | 545 | /* |
556 | * Return a compatible lock_state. If no initialized lock_state structure | 546 | * Return a compatible lock_state. If no initialized lock_state structure |
557 | * exists, return an uninitialized one. | 547 | * exists, return an uninitialized one. |
@@ -568,14 +558,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
568 | return NULL; | 558 | return NULL; |
569 | lsp->ls_flags = 0; | 559 | lsp->ls_flags = 0; |
570 | lsp->ls_seqid = 0; /* arbitrary */ | 560 | lsp->ls_seqid = 0; /* arbitrary */ |
571 | lsp->ls_id = -1; | ||
572 | memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); | 561 | memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); |
573 | atomic_set(&lsp->ls_count, 1); | 562 | atomic_set(&lsp->ls_count, 1); |
574 | lsp->ls_owner = fl_owner; | 563 | lsp->ls_owner = fl_owner; |
575 | INIT_LIST_HEAD(&lsp->ls_locks); | ||
576 | spin_lock(&clp->cl_lock); | 564 | spin_lock(&clp->cl_lock); |
577 | lsp->ls_id = nfs4_alloc_lockowner_id(clp); | 565 | lsp->ls_id = nfs4_alloc_lockowner_id(clp); |
578 | spin_unlock(&clp->cl_lock); | 566 | spin_unlock(&clp->cl_lock); |
567 | INIT_LIST_HEAD(&lsp->ls_locks); | ||
579 | return lsp; | 568 | return lsp; |
580 | } | 569 | } |
581 | 570 | ||
@@ -585,121 +574,112 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
585 | * | 574 | * |
586 | * The caller must be holding state->lock_sema and clp->cl_sem | 575 | * The caller must be holding state->lock_sema and clp->cl_sem |
587 | */ | 576 | */ |
588 | struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) | 577 | static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) |
589 | { | 578 | { |
590 | struct nfs4_lock_state * lsp; | 579 | struct nfs4_lock_state *lsp, *new = NULL; |
591 | 580 | ||
592 | lsp = nfs4_find_lock_state(state, owner); | 581 | for(;;) { |
593 | if (lsp == NULL) | 582 | spin_lock(&state->state_lock); |
594 | lsp = nfs4_alloc_lock_state(state, owner); | 583 | lsp = __nfs4_find_lock_state(state, owner); |
584 | if (lsp != NULL) | ||
585 | break; | ||
586 | if (new != NULL) { | ||
587 | new->ls_state = state; | ||
588 | list_add(&new->ls_locks, &state->lock_states); | ||
589 | set_bit(LK_STATE_IN_USE, &state->flags); | ||
590 | lsp = new; | ||
591 | new = NULL; | ||
592 | break; | ||
593 | } | ||
594 | spin_unlock(&state->state_lock); | ||
595 | new = nfs4_alloc_lock_state(state, owner); | ||
596 | if (new == NULL) | ||
597 | return NULL; | ||
598 | } | ||
599 | spin_unlock(&state->state_lock); | ||
600 | kfree(new); | ||
595 | return lsp; | 601 | return lsp; |
596 | } | 602 | } |
597 | 603 | ||
598 | /* | 604 | /* |
599 | * Byte-range lock aware utility to initialize the stateid of read/write | 605 | * Release reference to lock_state, and free it if we see that |
600 | * requests. | 606 | * it is no longer in use |
601 | */ | 607 | */ |
602 | void | 608 | static void nfs4_put_lock_state(struct nfs4_lock_state *lsp) |
603 | nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) | ||
604 | { | 609 | { |
605 | if (test_bit(LK_STATE_IN_USE, &state->flags)) { | 610 | struct nfs4_state *state; |
606 | struct nfs4_lock_state *lsp; | ||
607 | 611 | ||
608 | lsp = nfs4_find_lock_state(state, fl_owner); | 612 | if (lsp == NULL) |
609 | if (lsp) { | 613 | return; |
610 | memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); | 614 | state = lsp->ls_state; |
611 | nfs4_put_lock_state(lsp); | 615 | if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock)) |
612 | return; | 616 | return; |
613 | } | 617 | list_del(&lsp->ls_locks); |
614 | } | 618 | if (list_empty(&state->lock_states)) |
615 | memcpy(dst, &state->stateid, sizeof(*dst)); | 619 | clear_bit(LK_STATE_IN_USE, &state->flags); |
620 | spin_unlock(&state->state_lock); | ||
621 | kfree(lsp); | ||
616 | } | 622 | } |
617 | 623 | ||
618 | /* | 624 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) |
619 | * Called with state->lock_sema and clp->cl_sem held. | ||
620 | */ | ||
621 | void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) | ||
622 | { | 625 | { |
623 | if (status == NFS_OK || seqid_mutating_err(-status)) | 626 | struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner; |
624 | lsp->ls_seqid++; | ||
625 | } | ||
626 | 627 | ||
627 | /* | 628 | dst->fl_u.nfs4_fl.owner = lsp; |
628 | * Check to see if the request lock (type FL_UNLK) effects the fl lock. | 629 | atomic_inc(&lsp->ls_count); |
629 | * | 630 | } |
630 | * fl and request must have the same posix owner | ||
631 | * | ||
632 | * return: | ||
633 | * 0 -> fl not effected by request | ||
634 | * 1 -> fl consumed by request | ||
635 | */ | ||
636 | 631 | ||
637 | static int | 632 | static void nfs4_fl_release_lock(struct file_lock *fl) |
638 | nfs4_check_unlock(struct file_lock *fl, struct file_lock *request) | ||
639 | { | 633 | { |
640 | if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end) | 634 | nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); |
641 | return 1; | ||
642 | return 0; | ||
643 | } | 635 | } |
644 | 636 | ||
645 | /* | 637 | static struct file_lock_operations nfs4_fl_lock_ops = { |
646 | * Post an initialized lock_state on the state->lock_states list. | 638 | .fl_copy_lock = nfs4_fl_copy_lock, |
647 | */ | 639 | .fl_release_private = nfs4_fl_release_lock, |
648 | void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) | 640 | }; |
641 | |||
642 | int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl) | ||
649 | { | 643 | { |
650 | if (!list_empty(&lsp->ls_locks)) | 644 | struct nfs4_lock_state *lsp; |
651 | return; | 645 | |
652 | atomic_inc(&lsp->ls_count); | 646 | if (fl->fl_ops != NULL) |
653 | write_lock(&state->state_lock); | 647 | return 0; |
654 | list_add(&lsp->ls_locks, &state->lock_states); | 648 | lsp = nfs4_get_lock_state(state, fl->fl_owner); |
655 | set_bit(LK_STATE_IN_USE, &state->flags); | 649 | if (lsp == NULL) |
656 | write_unlock(&state->state_lock); | 650 | return -ENOMEM; |
651 | fl->fl_u.nfs4_fl.owner = lsp; | ||
652 | fl->fl_ops = &nfs4_fl_lock_ops; | ||
653 | return 0; | ||
657 | } | 654 | } |
658 | 655 | ||
659 | /* | 656 | /* |
660 | * to decide to 'reap' lock state: | 657 | * Byte-range lock aware utility to initialize the stateid of read/write |
661 | * 1) search i_flock for file_locks with fl.lock_state = to ls. | 658 | * requests. |
662 | * 2) determine if unlock will consume found lock. | ||
663 | * if so, reap | ||
664 | * | ||
665 | * else, don't reap. | ||
666 | * | ||
667 | */ | 659 | */ |
668 | void | 660 | void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner) |
669 | nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) | ||
670 | { | 661 | { |
671 | struct inode *inode = state->inode; | 662 | struct nfs4_lock_state *lsp; |
672 | struct file_lock *fl; | ||
673 | 663 | ||
674 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | 664 | memcpy(dst, &state->stateid, sizeof(*dst)); |
675 | if (!(fl->fl_flags & FL_POSIX)) | 665 | if (test_bit(LK_STATE_IN_USE, &state->flags) == 0) |
676 | continue; | 666 | return; |
677 | if (fl->fl_owner != lsp->ls_owner) | ||
678 | continue; | ||
679 | /* Exit if we find at least one lock which is not consumed */ | ||
680 | if (nfs4_check_unlock(fl,request) == 0) | ||
681 | return; | ||
682 | } | ||
683 | 667 | ||
684 | write_lock(&state->state_lock); | 668 | spin_lock(&state->state_lock); |
685 | list_del_init(&lsp->ls_locks); | 669 | lsp = __nfs4_find_lock_state(state, fl_owner); |
686 | if (list_empty(&state->lock_states)) | 670 | if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) |
687 | clear_bit(LK_STATE_IN_USE, &state->flags); | 671 | memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); |
688 | write_unlock(&state->state_lock); | 672 | spin_unlock(&state->state_lock); |
689 | nfs4_put_lock_state(lsp); | 673 | nfs4_put_lock_state(lsp); |
690 | } | 674 | } |
691 | 675 | ||
692 | /* | 676 | /* |
693 | * Release reference to lock_state, and free it if we see that | 677 | * Called with state->lock_sema and clp->cl_sem held. |
694 | * it is no longer in use | 678 | */ |
695 | */ | 679 | void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) |
696 | void | ||
697 | nfs4_put_lock_state(struct nfs4_lock_state *lsp) | ||
698 | { | 680 | { |
699 | if (!atomic_dec_and_test(&lsp->ls_count)) | 681 | if (status == NFS_OK || seqid_mutating_err(-status)) |
700 | return; | 682 | lsp->ls_seqid++; |
701 | BUG_ON (!list_empty(&lsp->ls_locks)); | ||
702 | kfree(lsp); | ||
703 | } | 683 | } |
704 | 684 | ||
705 | /* | 685 | /* |