aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2007-01-15 11:34:52 -0500
committerSteven Whitehouse <swhiteho@redhat.com>2007-02-05 13:36:55 -0500
commita1bc86e6bddd34362ca08a3a4d898eb4b5c15215 (patch)
tree92b30f8f9400c5a1b71a2e3a17397b9d0b2cc2f2 /fs/dlm
parent1d6e8131cf0064ef5ab5f3411a82b800afbfadee (diff)
[DLM] fix user unlocking
When a user process exits, we clear all the locks it holds. There is a problem, though, with locks that the process had begun unlocking before it exited. We couldn't find the lkb's that were in the process of being unlocked remotely, to flag that they are DEAD. To solve this, we move lkb's being unlocked onto a new list in the per-process structure that tracks what locks the process is holding. We can then go through this list to flag the necessary lkb's when clearing locks for a process when it exits. Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/lock.c30
-rw-r--r--fs/dlm/user.c9
3 files changed, 28 insertions, 12 deletions
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index ee993c5c2307..61d93201e1b2 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -526,6 +526,7 @@ struct dlm_user_proc {
526 spinlock_t asts_spin; 526 spinlock_t asts_spin;
527 struct list_head locks; 527 struct list_head locks;
528 spinlock_t locks_spin; 528 spinlock_t locks_spin;
529 struct list_head unlocking;
529 wait_queue_head_t wait; 530 wait_queue_head_t wait;
530}; 531};
531 532
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 5bac9827ded3..6ad2b8eb96a5 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -3772,12 +3772,10 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3772 goto out_put; 3772 goto out_put;
3773 3773
3774 spin_lock(&ua->proc->locks_spin); 3774 spin_lock(&ua->proc->locks_spin);
3775 list_del_init(&lkb->lkb_ownqueue); 3775 /* dlm_user_add_ast() may have already taken lkb off the proc list */
3776 if (!list_empty(&lkb->lkb_ownqueue))
3777 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
3776 spin_unlock(&ua->proc->locks_spin); 3778 spin_unlock(&ua->proc->locks_spin);
3777
3778 /* this removes the reference for the proc->locks list added by
3779 dlm_user_request */
3780 unhold_lkb(lkb);
3781 out_put: 3779 out_put:
3782 dlm_put_lkb(lkb); 3780 dlm_put_lkb(lkb);
3783 out: 3781 out:
@@ -3817,9 +3815,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3817 /* this lkb was removed from the WAITING queue */ 3815 /* this lkb was removed from the WAITING queue */
3818 if (lkb->lkb_grmode == DLM_LOCK_IV) { 3816 if (lkb->lkb_grmode == DLM_LOCK_IV) {
3819 spin_lock(&ua->proc->locks_spin); 3817 spin_lock(&ua->proc->locks_spin);
3820 list_del_init(&lkb->lkb_ownqueue); 3818 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
3821 spin_unlock(&ua->proc->locks_spin); 3819 spin_unlock(&ua->proc->locks_spin);
3822 unhold_lkb(lkb);
3823 } 3820 }
3824 out_put: 3821 out_put:
3825 dlm_put_lkb(lkb); 3822 dlm_put_lkb(lkb);
@@ -3880,11 +3877,6 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3880 mutex_lock(&ls->ls_clear_proc_locks); 3877 mutex_lock(&ls->ls_clear_proc_locks);
3881 3878
3882 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) { 3879 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) {
3883 if (lkb->lkb_ast_type) {
3884 list_del(&lkb->lkb_astqueue);
3885 unhold_lkb(lkb);
3886 }
3887
3888 list_del_init(&lkb->lkb_ownqueue); 3880 list_del_init(&lkb->lkb_ownqueue);
3889 3881
3890 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) { 3882 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
@@ -3901,6 +3893,20 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3901 3893
3902 dlm_put_lkb(lkb); 3894 dlm_put_lkb(lkb);
3903 } 3895 }
3896
3897 /* in-progress unlocks */
3898 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
3899 list_del_init(&lkb->lkb_ownqueue);
3900 lkb->lkb_flags |= DLM_IFL_DEAD;
3901 dlm_put_lkb(lkb);
3902 }
3903
3904 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
3905 list_del(&lkb->lkb_astqueue);
3906 dlm_put_lkb(lkb);
3907 }
3908
3904 mutex_unlock(&ls->ls_clear_proc_locks); 3909 mutex_unlock(&ls->ls_clear_proc_locks);
3905 unlock_recovery(ls); 3910 unlock_recovery(ls);
3906} 3911}
3912
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index c37e93e4f2df..d378b7fe2a1e 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -180,6 +180,14 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
180 ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue)) 180 ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue))
181 remove_ownqueue = 1; 181 remove_ownqueue = 1;
182 182
183 /* unlocks or cancels of waiting requests need to be removed from the
184 proc's unlocking list, again there must be a better way... */
185
186 if (ua->lksb.sb_status == -DLM_EUNLOCK ||
187 (ua->lksb.sb_status == -DLM_ECANCEL &&
188 lkb->lkb_grmode == DLM_LOCK_IV))
189 remove_ownqueue = 1;
190
183 /* We want to copy the lvb to userspace when the completion 191 /* We want to copy the lvb to userspace when the completion
184 ast is read if the status is 0, the lock has an lvb and 192 ast is read if the status is 0, the lock has an lvb and
185 lvb_ops says we should. We could probably have set_lvb_lock() 193 lvb_ops says we should. We could probably have set_lvb_lock()
@@ -523,6 +531,7 @@ static int device_open(struct inode *inode, struct file *file)
523 proc->lockspace = ls->ls_local_handle; 531 proc->lockspace = ls->ls_local_handle;
524 INIT_LIST_HEAD(&proc->asts); 532 INIT_LIST_HEAD(&proc->asts);
525 INIT_LIST_HEAD(&proc->locks); 533 INIT_LIST_HEAD(&proc->locks);
534 INIT_LIST_HEAD(&proc->unlocking);
526 spin_lock_init(&proc->asts_spin); 535 spin_lock_init(&proc->asts_spin);
527 spin_lock_init(&proc->locks_spin); 536 spin_lock_init(&proc->locks_spin);
528 init_waitqueue_head(&proc->wait); 537 init_waitqueue_head(&proc->wait);