diff options
Diffstat (limited to 'fs/ocfs2/dlm/dlmthread.c')
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 68 |
1 files changed, 62 insertions, 6 deletions
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 44d3b57ae8a8..0c822f3ffb05 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/inet.h> | 39 | #include <linux/inet.h> |
40 | #include <linux/timer.h> | 40 | #include <linux/timer.h> |
41 | #include <linux/kthread.h> | 41 | #include <linux/kthread.h> |
42 | #include <linux/delay.h> | ||
42 | 43 | ||
43 | 44 | ||
44 | #include "cluster/heartbeat.h" | 45 | #include "cluster/heartbeat.h" |
@@ -53,6 +54,8 @@ | |||
53 | #include "cluster/masklog.h" | 54 | #include "cluster/masklog.h" |
54 | 55 | ||
55 | static int dlm_thread(void *data); | 56 | static int dlm_thread(void *data); |
57 | static void dlm_purge_lockres_now(struct dlm_ctxt *dlm, | ||
58 | struct dlm_lock_resource *lockres); | ||
56 | 59 | ||
57 | static void dlm_flush_asts(struct dlm_ctxt *dlm); | 60 | static void dlm_flush_asts(struct dlm_ctxt *dlm); |
58 | 61 | ||
@@ -80,7 +83,7 @@ repeat: | |||
80 | } | 83 | } |
81 | 84 | ||
82 | 85 | ||
83 | static int __dlm_lockres_unused(struct dlm_lock_resource *res) | 86 | int __dlm_lockres_unused(struct dlm_lock_resource *res) |
84 | { | 87 | { |
85 | if (list_empty(&res->granted) && | 88 | if (list_empty(&res->granted) && |
86 | list_empty(&res->converting) && | 89 | list_empty(&res->converting) && |
@@ -103,6 +106,20 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
103 | assert_spin_locked(&res->spinlock); | 106 | assert_spin_locked(&res->spinlock); |
104 | 107 | ||
105 | if (__dlm_lockres_unused(res)){ | 108 | if (__dlm_lockres_unused(res)){ |
109 | /* For now, just keep any resource we master */ | ||
110 | if (res->owner == dlm->node_num) | ||
111 | { | ||
112 | if (!list_empty(&res->purge)) { | ||
113 | mlog(0, "we master %s:%.*s, but it is on " | ||
114 | "the purge list. Removing\n", | ||
115 | dlm->name, res->lockname.len, | ||
116 | res->lockname.name); | ||
117 | list_del_init(&res->purge); | ||
118 | dlm->purge_count--; | ||
119 | } | ||
120 | return; | ||
121 | } | ||
122 | |||
106 | if (list_empty(&res->purge)) { | 123 | if (list_empty(&res->purge)) { |
107 | mlog(0, "putting lockres %.*s from purge list\n", | 124 | mlog(0, "putting lockres %.*s from purge list\n", |
108 | res->lockname.len, res->lockname.name); | 125 | res->lockname.len, res->lockname.name); |
@@ -110,10 +127,23 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
110 | res->last_used = jiffies; | 127 | res->last_used = jiffies; |
111 | list_add_tail(&res->purge, &dlm->purge_list); | 128 | list_add_tail(&res->purge, &dlm->purge_list); |
112 | dlm->purge_count++; | 129 | dlm->purge_count++; |
130 | |||
131 | /* if this node is not the owner, there is | ||
132 | * no way to keep track of who the owner could be. | ||
133 | * unhash it to avoid serious problems. */ | ||
134 | if (res->owner != dlm->node_num) { | ||
135 | mlog(0, "%s:%.*s: doing immediate " | ||
136 | "purge of lockres owned by %u\n", | ||
137 | dlm->name, res->lockname.len, | ||
138 | res->lockname.name, res->owner); | ||
139 | |||
140 | dlm_purge_lockres_now(dlm, res); | ||
141 | } | ||
113 | } | 142 | } |
114 | } else if (!list_empty(&res->purge)) { | 143 | } else if (!list_empty(&res->purge)) { |
115 | mlog(0, "removing lockres %.*s from purge list\n", | 144 | mlog(0, "removing lockres %.*s from purge list, " |
116 | res->lockname.len, res->lockname.name); | 145 | "owner=%u\n", res->lockname.len, res->lockname.name, |
146 | res->owner); | ||
117 | 147 | ||
118 | list_del_init(&res->purge); | 148 | list_del_init(&res->purge); |
119 | dlm->purge_count--; | 149 | dlm->purge_count--; |
@@ -165,6 +195,7 @@ again: | |||
165 | } else if (ret < 0) { | 195 | } else if (ret < 0) { |
166 | mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", | 196 | mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", |
167 | lockres->lockname.len, lockres->lockname.name); | 197 | lockres->lockname.len, lockres->lockname.name); |
198 | msleep(100); | ||
168 | goto again; | 199 | goto again; |
169 | } | 200 | } |
170 | 201 | ||
@@ -178,6 +209,24 @@ finish: | |||
178 | __dlm_unhash_lockres(lockres); | 209 | __dlm_unhash_lockres(lockres); |
179 | } | 210 | } |
180 | 211 | ||
212 | /* make an unused lockres go away immediately. | ||
213 | * as soon as the dlm spinlock is dropped, this lockres | ||
214 | * will not be found. kfree still happens on last put. */ | ||
215 | static void dlm_purge_lockres_now(struct dlm_ctxt *dlm, | ||
216 | struct dlm_lock_resource *lockres) | ||
217 | { | ||
218 | assert_spin_locked(&dlm->spinlock); | ||
219 | assert_spin_locked(&lockres->spinlock); | ||
220 | |||
221 | BUG_ON(!__dlm_lockres_unused(lockres)); | ||
222 | |||
223 | if (!list_empty(&lockres->purge)) { | ||
224 | list_del_init(&lockres->purge); | ||
225 | dlm->purge_count--; | ||
226 | } | ||
227 | __dlm_unhash_lockres(lockres); | ||
228 | } | ||
229 | |||
181 | static void dlm_run_purge_list(struct dlm_ctxt *dlm, | 230 | static void dlm_run_purge_list(struct dlm_ctxt *dlm, |
182 | int purge_now) | 231 | int purge_now) |
183 | { | 232 | { |
@@ -420,6 +469,8 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
420 | /* don't shuffle secondary queues */ | 469 | /* don't shuffle secondary queues */ |
421 | if ((res->owner == dlm->node_num) && | 470 | if ((res->owner == dlm->node_num) && |
422 | !(res->state & DLM_LOCK_RES_DIRTY)) { | 471 | !(res->state & DLM_LOCK_RES_DIRTY)) { |
472 | /* ref for dirty_list */ | ||
473 | dlm_lockres_get(res); | ||
423 | list_add_tail(&res->dirty, &dlm->dirty_list); | 474 | list_add_tail(&res->dirty, &dlm->dirty_list); |
424 | res->state |= DLM_LOCK_RES_DIRTY; | 475 | res->state |= DLM_LOCK_RES_DIRTY; |
425 | } | 476 | } |
@@ -604,6 +655,8 @@ static int dlm_thread(void *data) | |||
604 | list_del_init(&res->dirty); | 655 | list_del_init(&res->dirty); |
605 | spin_unlock(&res->spinlock); | 656 | spin_unlock(&res->spinlock); |
606 | spin_unlock(&dlm->spinlock); | 657 | spin_unlock(&dlm->spinlock); |
658 | /* Drop dirty_list ref */ | ||
659 | dlm_lockres_put(res); | ||
607 | 660 | ||
608 | /* lockres can be re-dirtied/re-added to the | 661 | /* lockres can be re-dirtied/re-added to the |
609 | * dirty_list in this gap, but that is ok */ | 662 | * dirty_list in this gap, but that is ok */ |
@@ -640,8 +693,9 @@ static int dlm_thread(void *data) | |||
640 | * spinlock and do NOT have the dlm lock. | 693 | * spinlock and do NOT have the dlm lock. |
641 | * safe to reserve/queue asts and run the lists. */ | 694 | * safe to reserve/queue asts and run the lists. */ |
642 | 695 | ||
643 | mlog(0, "calling dlm_shuffle_lists with dlm=%p, " | 696 | mlog(0, "calling dlm_shuffle_lists with dlm=%s, " |
644 | "res=%p\n", dlm, res); | 697 | "res=%.*s\n", dlm->name, |
698 | res->lockname.len, res->lockname.name); | ||
645 | 699 | ||
646 | /* called while holding lockres lock */ | 700 | /* called while holding lockres lock */ |
647 | dlm_shuffle_lists(dlm, res); | 701 | dlm_shuffle_lists(dlm, res); |
@@ -655,6 +709,8 @@ in_progress: | |||
655 | /* if the lock was in-progress, stick | 709 | /* if the lock was in-progress, stick |
656 | * it on the back of the list */ | 710 | * it on the back of the list */ |
657 | if (delay) { | 711 | if (delay) { |
712 | /* ref for dirty_list */ | ||
713 | dlm_lockres_get(res); | ||
658 | spin_lock(&res->spinlock); | 714 | spin_lock(&res->spinlock); |
659 | list_add_tail(&res->dirty, &dlm->dirty_list); | 715 | list_add_tail(&res->dirty, &dlm->dirty_list); |
660 | res->state |= DLM_LOCK_RES_DIRTY; | 716 | res->state |= DLM_LOCK_RES_DIRTY; |
@@ -675,7 +731,7 @@ in_progress: | |||
675 | 731 | ||
676 | /* yield and continue right away if there is more work to do */ | 732 | /* yield and continue right away if there is more work to do */ |
677 | if (!n) { | 733 | if (!n) { |
678 | yield(); | 734 | cond_resched(); |
679 | continue; | 735 | continue; |
680 | } | 736 | } |
681 | 737 | ||