aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h14
-rw-r--r--fs/ocfs2/dlm/dlmthread.c6
2 files changed, 17 insertions, 3 deletions
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 78eccd0951e4..829cc3948804 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -235,18 +235,26 @@ struct dlm_lock_resource
235 struct qstr lockname; 235 struct qstr lockname;
236 struct kref refs; 236 struct kref refs;
237 237
238 /* please keep these next 3 in this order 238 /*
239 * some funcs want to iterate over all lists */ 239 * Please keep granted, converting, and blocked in this order,
240 * as some funcs want to iterate over all lists.
241 *
242 * All four lists are protected by the hash's reference.
243 */
240 struct list_head granted; 244 struct list_head granted;
241 struct list_head converting; 245 struct list_head converting;
242 struct list_head blocked; 246 struct list_head blocked;
247 struct list_head purge;
243 248
249 /*
250 * These two lists require you to hold an additional reference
251 * while they are on the list.
252 */
244 struct list_head dirty; 253 struct list_head dirty;
245 struct list_head recovering; // dlm_recovery_ctxt.resources list 254 struct list_head recovering; // dlm_recovery_ctxt.resources list
246 255
247 /* unused lock resources have their last_used stamped and are 256 /* unused lock resources have their last_used stamped and are
248 * put on a list for the dlm thread to run. */ 257 * put on a list for the dlm thread to run. */
249 struct list_head purge;
250 unsigned long last_used; 258 unsigned long last_used;
251 259
252 unsigned migration_pending:1; 260 unsigned migration_pending:1;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 76526ea95bb2..610dc76a851b 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -455,6 +455,8 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
455 /* don't shuffle secondary queues */ 455 /* don't shuffle secondary queues */
456 if ((res->owner == dlm->node_num) && 456 if ((res->owner == dlm->node_num) &&
457 !(res->state & DLM_LOCK_RES_DIRTY)) { 457 !(res->state & DLM_LOCK_RES_DIRTY)) {
458 /* ref for dirty_list */
459 dlm_lockres_get(res);
458 list_add_tail(&res->dirty, &dlm->dirty_list); 460 list_add_tail(&res->dirty, &dlm->dirty_list);
459 res->state |= DLM_LOCK_RES_DIRTY; 461 res->state |= DLM_LOCK_RES_DIRTY;
460 } 462 }
@@ -639,6 +641,8 @@ static int dlm_thread(void *data)
639 list_del_init(&res->dirty); 641 list_del_init(&res->dirty);
640 spin_unlock(&res->spinlock); 642 spin_unlock(&res->spinlock);
641 spin_unlock(&dlm->spinlock); 643 spin_unlock(&dlm->spinlock);
644 /* Drop dirty_list ref */
645 dlm_lockres_put(res);
642 646
643 /* lockres can be re-dirtied/re-added to the 647 /* lockres can be re-dirtied/re-added to the
644 * dirty_list in this gap, but that is ok */ 648 * dirty_list in this gap, but that is ok */
@@ -691,6 +695,8 @@ in_progress:
691 /* if the lock was in-progress, stick 695 /* if the lock was in-progress, stick
692 * it on the back of the list */ 696 * it on the back of the list */
693 if (delay) { 697 if (delay) {
698 /* ref for dirty_list */
699 dlm_lockres_get(res);
694 spin_lock(&res->spinlock); 700 spin_lock(&res->spinlock);
695 list_add_tail(&res->dirty, &dlm->dirty_list); 701 list_add_tail(&res->dirty, &dlm->dirty_list);
696 res->state |= DLM_LOCK_RES_DIRTY; 702 res->state |= DLM_LOCK_RES_DIRTY;