aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 13:16:32 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2005-06-22 16:07:42 -0400
commit8d0a8a9d0ec790086c64d210af413ac351d89e35 (patch)
tree003a1481e4a8d8487956a6bf04db80dd93264b8b
parentecdbf769b2cb8903e07cd482334c714d89fd1146 (diff)
[PATCH] NFSv4: Clean up nfs4 lock state accounting
Ensure that lock owner structures are not released prematurely. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--fs/nfs/nfs4_fs.h9
-rw-r--r--fs/nfs/nfs4proc.c69
-rw-r--r--fs/nfs/nfs4state.c178
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/nfs_fs_i.h5
5 files changed, 118 insertions, 144 deletions
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 7c6f1d668fbd..ec1a22d7b876 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -128,6 +128,7 @@ struct nfs4_state_owner {
128 128
129struct nfs4_lock_state { 129struct nfs4_lock_state {
130 struct list_head ls_locks; /* Other lock stateids */ 130 struct list_head ls_locks; /* Other lock stateids */
131 struct nfs4_state * ls_state; /* Pointer to open state */
131 fl_owner_t ls_owner; /* POSIX lock owner */ 132 fl_owner_t ls_owner; /* POSIX lock owner */
132#define NFS_LOCK_INITIALIZED 1 133#define NFS_LOCK_INITIALIZED 1
133 int ls_flags; 134 int ls_flags;
@@ -153,7 +154,7 @@ struct nfs4_state {
153 154
154 unsigned long flags; /* Do we hold any locks? */ 155 unsigned long flags; /* Do we hold any locks? */
155 struct semaphore lock_sema; /* Serializes file locking operations */ 156 struct semaphore lock_sema; /* Serializes file locking operations */
156 rwlock_t state_lock; /* Protects the lock_states list */ 157 spinlock_t state_lock; /* Protects the lock_states list */
157 158
158 nfs4_stateid stateid; 159 nfs4_stateid stateid;
159 160
@@ -225,12 +226,8 @@ extern void nfs4_close_state(struct nfs4_state *, mode_t);
225extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode); 226extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
226extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp); 227extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
227extern void nfs4_schedule_state_recovery(struct nfs4_client *); 228extern void nfs4_schedule_state_recovery(struct nfs4_client *);
228extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t); 229extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
229extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t);
230extern void nfs4_put_lock_state(struct nfs4_lock_state *state);
231extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls); 230extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
232extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
233extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
234extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); 231extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
235 232
236extern const nfs4_stateid zero_stateid; 233extern const nfs4_stateid zero_stateid;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index af80b5981486..0ddc20102d46 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2626,14 +2626,11 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
2626 down_read(&clp->cl_sem); 2626 down_read(&clp->cl_sem);
2627 nlo.clientid = clp->cl_clientid; 2627 nlo.clientid = clp->cl_clientid;
2628 down(&state->lock_sema); 2628 down(&state->lock_sema);
2629 lsp = nfs4_find_lock_state(state, request->fl_owner); 2629 status = nfs4_set_lock_state(state, request);
2630 if (lsp) 2630 if (status != 0)
2631 nlo.id = lsp->ls_id; 2631 goto out;
2632 else { 2632 lsp = request->fl_u.nfs4_fl.owner;
2633 spin_lock(&clp->cl_lock); 2633 nlo.id = lsp->ls_id;
2634 nlo.id = nfs4_alloc_lockowner_id(clp);
2635 spin_unlock(&clp->cl_lock);
2636 }
2637 arg.u.lockt = &nlo; 2634 arg.u.lockt = &nlo;
2638 status = rpc_call_sync(server->client, &msg, 0); 2635 status = rpc_call_sync(server->client, &msg, 0);
2639 if (!status) { 2636 if (!status) {
@@ -2654,8 +2651,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
2654 request->fl_pid = 0; 2651 request->fl_pid = 0;
2655 status = 0; 2652 status = 0;
2656 } 2653 }
2657 if (lsp) 2654out:
2658 nfs4_put_lock_state(lsp);
2659 up(&state->lock_sema); 2655 up(&state->lock_sema);
2660 up_read(&clp->cl_sem); 2656 up_read(&clp->cl_sem);
2661 return status; 2657 return status;
@@ -2715,28 +2711,26 @@ static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock
2715 }; 2711 };
2716 struct nfs4_lock_state *lsp; 2712 struct nfs4_lock_state *lsp;
2717 struct nfs_locku_opargs luargs; 2713 struct nfs_locku_opargs luargs;
2718 int status = 0; 2714 int status;
2719 2715
2720 down_read(&clp->cl_sem); 2716 down_read(&clp->cl_sem);
2721 down(&state->lock_sema); 2717 down(&state->lock_sema);
2722 lsp = nfs4_find_lock_state(state, request->fl_owner); 2718 status = nfs4_set_lock_state(state, request);
2723 if (!lsp) 2719 if (status != 0)
2724 goto out; 2720 goto out;
2721 lsp = request->fl_u.nfs4_fl.owner;
2725 /* We might have lost the locks! */ 2722 /* We might have lost the locks! */
2726 if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) { 2723 if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0)
2727 luargs.seqid = lsp->ls_seqid; 2724 goto out;
2728 memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); 2725 luargs.seqid = lsp->ls_seqid;
2729 arg.u.locku = &luargs; 2726 memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
2730 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); 2727 arg.u.locku = &luargs;
2731 nfs4_increment_lock_seqid(status, lsp); 2728 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
2732 } 2729 nfs4_increment_lock_seqid(status, lsp);
2733 2730
2734 if (status == 0) { 2731 if (status == 0)
2735 memcpy(&lsp->ls_stateid, &res.u.stateid, 2732 memcpy(&lsp->ls_stateid, &res.u.stateid,
2736 sizeof(lsp->ls_stateid)); 2733 sizeof(lsp->ls_stateid));
2737 nfs4_notify_unlck(state, request, lsp);
2738 }
2739 nfs4_put_lock_state(lsp);
2740out: 2734out:
2741 up(&state->lock_sema); 2735 up(&state->lock_sema);
2742 if (status == 0) 2736 if (status == 0)
@@ -2762,7 +2756,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
2762{ 2756{
2763 struct inode *inode = state->inode; 2757 struct inode *inode = state->inode;
2764 struct nfs_server *server = NFS_SERVER(inode); 2758 struct nfs_server *server = NFS_SERVER(inode);
2765 struct nfs4_lock_state *lsp; 2759 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
2766 struct nfs_lockargs arg = { 2760 struct nfs_lockargs arg = {
2767 .fh = NFS_FH(inode), 2761 .fh = NFS_FH(inode),
2768 .type = nfs4_lck_type(cmd, request), 2762 .type = nfs4_lck_type(cmd, request),
@@ -2784,9 +2778,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
2784 }; 2778 };
2785 int status; 2779 int status;
2786 2780
2787 lsp = nfs4_get_lock_state(state, request->fl_owner);
2788 if (lsp == NULL)
2789 return -ENOMEM;
2790 if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) { 2781 if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) {
2791 struct nfs4_state_owner *owner = state->owner; 2782 struct nfs4_state_owner *owner = state->owner;
2792 struct nfs_open_to_lock otl = { 2783 struct nfs_open_to_lock otl = {
@@ -2808,27 +2799,26 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r
2808 * seqid mutating errors */ 2799 * seqid mutating errors */
2809 nfs4_increment_seqid(status, owner); 2800 nfs4_increment_seqid(status, owner);
2810 up(&owner->so_sema); 2801 up(&owner->so_sema);
2802 if (status == 0) {
2803 lsp->ls_flags |= NFS_LOCK_INITIALIZED;
2804 lsp->ls_seqid++;
2805 }
2811 } else { 2806 } else {
2812 struct nfs_exist_lock el = { 2807 struct nfs_exist_lock el = {
2813 .seqid = lsp->ls_seqid, 2808 .seqid = lsp->ls_seqid,
2814 }; 2809 };
2815 memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid)); 2810 memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid));
2816 largs.u.exist_lock = &el; 2811 largs.u.exist_lock = &el;
2817 largs.new_lock_owner = 0;
2818 arg.u.lock = &largs; 2812 arg.u.lock = &largs;
2819 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); 2813 status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR);
2814 /* increment seqid on success, and * seqid mutating errors*/
2815 nfs4_increment_lock_seqid(status, lsp);
2820 } 2816 }
2821 /* increment seqid on success, and * seqid mutating errors*/
2822 nfs4_increment_lock_seqid(status, lsp);
2823 /* save the returned stateid. */ 2817 /* save the returned stateid. */
2824 if (status == 0) { 2818 if (status == 0)
2825 memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid)); 2819 memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
2826 lsp->ls_flags |= NFS_LOCK_INITIALIZED; 2820 else if (status == -NFS4ERR_DENIED)
2827 if (!reclaim)
2828 nfs4_notify_setlk(state, request, lsp);
2829 } else if (status == -NFS4ERR_DENIED)
2830 status = -EAGAIN; 2821 status = -EAGAIN;
2831 nfs4_put_lock_state(lsp);
2832 return status; 2822 return status;
2833} 2823}
2834 2824
@@ -2869,7 +2859,9 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
2869 2859
2870 down_read(&clp->cl_sem); 2860 down_read(&clp->cl_sem);
2871 down(&state->lock_sema); 2861 down(&state->lock_sema);
2872 status = _nfs4_do_setlk(state, cmd, request, 0); 2862 status = nfs4_set_lock_state(state, request);
2863 if (status == 0)
2864 status = _nfs4_do_setlk(state, cmd, request, 0);
2873 up(&state->lock_sema); 2865 up(&state->lock_sema);
2874 if (status == 0) { 2866 if (status == 0) {
2875 /* Note: we always want to sleep here! */ 2867 /* Note: we always want to sleep here! */
@@ -2927,7 +2919,6 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
2927 if (signalled()) 2919 if (signalled())
2928 break; 2920 break;
2929 } while(status < 0); 2921 } while(status < 0);
2930
2931 return status; 2922 return status;
2932} 2923}
2933 2924
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 591ad1d51880..afe587d82f1e 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -360,7 +360,7 @@ nfs4_alloc_open_state(void)
360 atomic_set(&state->count, 1); 360 atomic_set(&state->count, 1);
361 INIT_LIST_HEAD(&state->lock_states); 361 INIT_LIST_HEAD(&state->lock_states);
362 init_MUTEX(&state->lock_sema); 362 init_MUTEX(&state->lock_sema);
363 rwlock_init(&state->state_lock); 363 spin_lock_init(&state->state_lock);
364 return state; 364 return state;
365} 365}
366 366
@@ -542,16 +542,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
542 return NULL; 542 return NULL;
543} 543}
544 544
545struct nfs4_lock_state *
546nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
547{
548 struct nfs4_lock_state *lsp;
549 read_lock(&state->state_lock);
550 lsp = __nfs4_find_lock_state(state, fl_owner);
551 read_unlock(&state->state_lock);
552 return lsp;
553}
554
555/* 545/*
556 * Return a compatible lock_state. If no initialized lock_state structure 546 * Return a compatible lock_state. If no initialized lock_state structure
557 * exists, return an uninitialized one. 547 * exists, return an uninitialized one.
@@ -568,14 +558,13 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
568 return NULL; 558 return NULL;
569 lsp->ls_flags = 0; 559 lsp->ls_flags = 0;
570 lsp->ls_seqid = 0; /* arbitrary */ 560 lsp->ls_seqid = 0; /* arbitrary */
571 lsp->ls_id = -1;
572 memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); 561 memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
573 atomic_set(&lsp->ls_count, 1); 562 atomic_set(&lsp->ls_count, 1);
574 lsp->ls_owner = fl_owner; 563 lsp->ls_owner = fl_owner;
575 INIT_LIST_HEAD(&lsp->ls_locks);
576 spin_lock(&clp->cl_lock); 564 spin_lock(&clp->cl_lock);
577 lsp->ls_id = nfs4_alloc_lockowner_id(clp); 565 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
578 spin_unlock(&clp->cl_lock); 566 spin_unlock(&clp->cl_lock);
567 INIT_LIST_HEAD(&lsp->ls_locks);
579 return lsp; 568 return lsp;
580} 569}
581 570
@@ -585,121 +574,112 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
585 * 574 *
586 * The caller must be holding state->lock_sema and clp->cl_sem 575 * The caller must be holding state->lock_sema and clp->cl_sem
587 */ 576 */
588struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) 577static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
589{ 578{
590 struct nfs4_lock_state * lsp; 579 struct nfs4_lock_state *lsp, *new = NULL;
591 580
592 lsp = nfs4_find_lock_state(state, owner); 581 for(;;) {
593 if (lsp == NULL) 582 spin_lock(&state->state_lock);
594 lsp = nfs4_alloc_lock_state(state, owner); 583 lsp = __nfs4_find_lock_state(state, owner);
584 if (lsp != NULL)
585 break;
586 if (new != NULL) {
587 new->ls_state = state;
588 list_add(&new->ls_locks, &state->lock_states);
589 set_bit(LK_STATE_IN_USE, &state->flags);
590 lsp = new;
591 new = NULL;
592 break;
593 }
594 spin_unlock(&state->state_lock);
595 new = nfs4_alloc_lock_state(state, owner);
596 if (new == NULL)
597 return NULL;
598 }
599 spin_unlock(&state->state_lock);
600 kfree(new);
595 return lsp; 601 return lsp;
596} 602}
597 603
598/* 604/*
599 * Byte-range lock aware utility to initialize the stateid of read/write 605 * Release reference to lock_state, and free it if we see that
600 * requests. 606 * it is no longer in use
601 */ 607 */
602void 608static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
603nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
604{ 609{
605 if (test_bit(LK_STATE_IN_USE, &state->flags)) { 610 struct nfs4_state *state;
606 struct nfs4_lock_state *lsp;
607 611
608 lsp = nfs4_find_lock_state(state, fl_owner); 612 if (lsp == NULL)
609 if (lsp) { 613 return;
610 memcpy(dst, &lsp->ls_stateid, sizeof(*dst)); 614 state = lsp->ls_state;
611 nfs4_put_lock_state(lsp); 615 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
612 return; 616 return;
613 } 617 list_del(&lsp->ls_locks);
614 } 618 if (list_empty(&state->lock_states))
615 memcpy(dst, &state->stateid, sizeof(*dst)); 619 clear_bit(LK_STATE_IN_USE, &state->flags);
620 spin_unlock(&state->state_lock);
621 kfree(lsp);
616} 622}
617 623
618/* 624static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
619* Called with state->lock_sema and clp->cl_sem held.
620*/
621void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
622{ 625{
623 if (status == NFS_OK || seqid_mutating_err(-status)) 626 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
624 lsp->ls_seqid++;
625}
626 627
627/* 628 dst->fl_u.nfs4_fl.owner = lsp;
628* Check to see if the request lock (type FL_UNLK) effects the fl lock. 629 atomic_inc(&lsp->ls_count);
629* 630}
630* fl and request must have the same posix owner
631*
632* return:
633* 0 -> fl not effected by request
634* 1 -> fl consumed by request
635*/
636 631
637static int 632static void nfs4_fl_release_lock(struct file_lock *fl)
638nfs4_check_unlock(struct file_lock *fl, struct file_lock *request)
639{ 633{
640 if (fl->fl_start >= request->fl_start && fl->fl_end <= request->fl_end) 634 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
641 return 1;
642 return 0;
643} 635}
644 636
645/* 637static struct file_lock_operations nfs4_fl_lock_ops = {
646 * Post an initialized lock_state on the state->lock_states list. 638 .fl_copy_lock = nfs4_fl_copy_lock,
647 */ 639 .fl_release_private = nfs4_fl_release_lock,
648void nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp) 640};
641
642int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
649{ 643{
650 if (!list_empty(&lsp->ls_locks)) 644 struct nfs4_lock_state *lsp;
651 return; 645
652 atomic_inc(&lsp->ls_count); 646 if (fl->fl_ops != NULL)
653 write_lock(&state->state_lock); 647 return 0;
654 list_add(&lsp->ls_locks, &state->lock_states); 648 lsp = nfs4_get_lock_state(state, fl->fl_owner);
655 set_bit(LK_STATE_IN_USE, &state->flags); 649 if (lsp == NULL)
656 write_unlock(&state->state_lock); 650 return -ENOMEM;
651 fl->fl_u.nfs4_fl.owner = lsp;
652 fl->fl_ops = &nfs4_fl_lock_ops;
653 return 0;
657} 654}
658 655
659/* 656/*
660 * to decide to 'reap' lock state: 657 * Byte-range lock aware utility to initialize the stateid of read/write
661 * 1) search i_flock for file_locks with fl.lock_state = to ls. 658 * requests.
662 * 2) determine if unlock will consume found lock.
663 * if so, reap
664 *
665 * else, don't reap.
666 *
667 */ 659 */
668void 660void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
669nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
670{ 661{
671 struct inode *inode = state->inode; 662 struct nfs4_lock_state *lsp;
672 struct file_lock *fl;
673 663
674 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { 664 memcpy(dst, &state->stateid, sizeof(*dst));
675 if (!(fl->fl_flags & FL_POSIX)) 665 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
676 continue; 666 return;
677 if (fl->fl_owner != lsp->ls_owner)
678 continue;
679 /* Exit if we find at least one lock which is not consumed */
680 if (nfs4_check_unlock(fl,request) == 0)
681 return;
682 }
683 667
684 write_lock(&state->state_lock); 668 spin_lock(&state->state_lock);
685 list_del_init(&lsp->ls_locks); 669 lsp = __nfs4_find_lock_state(state, fl_owner);
686 if (list_empty(&state->lock_states)) 670 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
687 clear_bit(LK_STATE_IN_USE, &state->flags); 671 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
688 write_unlock(&state->state_lock); 672 spin_unlock(&state->state_lock);
689 nfs4_put_lock_state(lsp); 673 nfs4_put_lock_state(lsp);
690} 674}
691 675
692/* 676/*
693 * Release reference to lock_state, and free it if we see that 677* Called with state->lock_sema and clp->cl_sem held.
694 * it is no longer in use 678*/
695 */ 679void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
696void
697nfs4_put_lock_state(struct nfs4_lock_state *lsp)
698{ 680{
699 if (!atomic_dec_and_test(&lsp->ls_count)) 681 if (status == NFS_OK || seqid_mutating_err(-status))
700 return; 682 lsp->ls_seqid++;
701 BUG_ON (!list_empty(&lsp->ls_locks));
702 kfree(lsp);
703} 683}
704 684
705/* 685/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9b8b696d4f15..e5a8db00df29 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -674,6 +674,7 @@ struct file_lock {
674 struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ 674 struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
675 union { 675 union {
676 struct nfs_lock_info nfs_fl; 676 struct nfs_lock_info nfs_fl;
677 struct nfs4_lock_info nfs4_fl;
677 } fl_u; 678 } fl_u;
678}; 679};
679 680
diff --git a/include/linux/nfs_fs_i.h b/include/linux/nfs_fs_i.h
index e9a749588a7b..e2c18dabff86 100644
--- a/include/linux/nfs_fs_i.h
+++ b/include/linux/nfs_fs_i.h
@@ -16,6 +16,11 @@ struct nfs_lock_info {
16 struct nlm_lockowner *owner; 16 struct nlm_lockowner *owner;
17}; 17};
18 18
19struct nfs4_lock_state;
20struct nfs4_lock_info {
21 struct nfs4_lock_state *owner;
22};
23
19/* 24/*
20 * Lock flag values 25 * Lock flag values
21 */ 26 */