aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm/recoverd.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dlm/recoverd.c')
-rw-r--r--fs/dlm/recoverd.c44
1 files changed, 32 insertions, 12 deletions
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 362e3eff4dc9..650536aa5139 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -45,7 +45,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
45 unsigned long start; 45 unsigned long start;
46 int error, neg = 0; 46 int error, neg = 0;
47 47
48 log_debug(ls, "recover %llx", rv->seq); 48 log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
49 49
50 mutex_lock(&ls->ls_recoverd_active); 50 mutex_lock(&ls->ls_recoverd_active);
51 51
@@ -94,14 +94,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
94 } 94 }
95 95
96 /* 96 /*
97 * Purge directory-related requests that are saved in requestqueue.
98 * All dir requests from before recovery are invalid now due to the dir
99 * rebuild and will be resent by the requesting nodes.
100 */
101
102 dlm_purge_requestqueue(ls);
103
104 /*
105 * Wait for all nodes to complete directory rebuild. 97 * Wait for all nodes to complete directory rebuild.
106 */ 98 */
107 99
@@ -164,10 +156,31 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
164 */ 156 */
165 157
166 dlm_recover_rsbs(ls); 158 dlm_recover_rsbs(ls);
159 } else {
160 /*
161 * Other lockspace members may be going through the "neg" steps
162 * while also adding us to the lockspace, in which case they'll
163 * be doing the recover_locks (RS_LOCKS) barrier.
164 */
165 dlm_set_recover_status(ls, DLM_RS_LOCKS);
166
167 error = dlm_recover_locks_wait(ls);
168 if (error) {
169 log_error(ls, "recover_locks_wait failed %d", error);
170 goto fail;
171 }
167 } 172 }
168 173
169 dlm_release_root_list(ls); 174 dlm_release_root_list(ls);
170 175
176 /*
177 * Purge directory-related requests that are saved in requestqueue.
178 * All dir requests from before recovery are invalid now due to the dir
179 * rebuild and will be resent by the requesting nodes.
180 */
181
182 dlm_purge_requestqueue(ls);
183
171 dlm_set_recover_status(ls, DLM_RS_DONE); 184 dlm_set_recover_status(ls, DLM_RS_DONE);
172 error = dlm_recover_done_wait(ls); 185 error = dlm_recover_done_wait(ls);
173 if (error) { 186 if (error) {
@@ -199,7 +212,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
199 212
200 dlm_astd_wake(); 213 dlm_astd_wake();
201 214
202 log_debug(ls, "recover %llx done: %u ms", rv->seq, 215 log_debug(ls, "recover %llx done: %u ms",
216 (unsigned long long)rv->seq,
203 jiffies_to_msecs(jiffies - start)); 217 jiffies_to_msecs(jiffies - start));
204 mutex_unlock(&ls->ls_recoverd_active); 218 mutex_unlock(&ls->ls_recoverd_active);
205 219
@@ -207,11 +221,16 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
207 221
208 fail: 222 fail:
209 dlm_release_root_list(ls); 223 dlm_release_root_list(ls);
210 log_debug(ls, "recover %llx error %d", rv->seq, error); 224 log_debug(ls, "recover %llx error %d",
225 (unsigned long long)rv->seq, error);
211 mutex_unlock(&ls->ls_recoverd_active); 226 mutex_unlock(&ls->ls_recoverd_active);
212 return error; 227 return error;
213} 228}
214 229
230/* The dlm_ls_start() that created the rv we take here may already have been
231 stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
232 flag set. */
233
215static void do_ls_recovery(struct dlm_ls *ls) 234static void do_ls_recovery(struct dlm_ls *ls)
216{ 235{
217 struct dlm_recover *rv = NULL; 236 struct dlm_recover *rv = NULL;
@@ -219,7 +238,8 @@ static void do_ls_recovery(struct dlm_ls *ls)
219 spin_lock(&ls->ls_recover_lock); 238 spin_lock(&ls->ls_recover_lock);
220 rv = ls->ls_recover_args; 239 rv = ls->ls_recover_args;
221 ls->ls_recover_args = NULL; 240 ls->ls_recover_args = NULL;
222 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); 241 if (rv && ls->ls_recover_seq == rv->seq)
242 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
223 spin_unlock(&ls->ls_recover_lock); 243 spin_unlock(&ls->ls_recover_lock);
224 244
225 if (rv) { 245 if (rv) {