diff options
author | Sunil Mushran <sunil.mushran@oracle.com> | 2009-02-26 18:00:39 -0500 |
---|---|---|
committer | Mark Fasheh <mfasheh@suse.com> | 2009-04-03 14:39:18 -0400 |
commit | c2cd4a44333034203cb198915e2b75c3227d41bf (patch) | |
tree | 69e6bf95830daec0db0cd3219c55edfa87fc4e64 /fs/ocfs2/dlm/dlmmaster.c | |
parent | f77a9a78c3a1d995b3bf948dbcad5c4a1b2302d5 (diff) |
ocfs2/dlm: Refactor dlm_clean_master_list()
This patch refactors dlm_clean_master_list() so as to make it
easier to convert the mle list to a hash.
Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
Signed-off-by: Mark Fasheh <mfasheh@suse.com>
Diffstat (limited to 'fs/ocfs2/dlm/dlmmaster.c')
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 148 |
1 files changed, 85 insertions, 63 deletions
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 040581e1cd04..ec6da3c37dc8 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -3207,12 +3207,87 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | |||
3207 | return ret; | 3207 | return ret; |
3208 | } | 3208 | } |
3209 | 3209 | ||
3210 | /* | ||
3211 | * Sets the owner of the lockres, associated to the mle, to UNKNOWN | ||
3212 | */ | ||
3213 | static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, | ||
3214 | struct dlm_master_list_entry *mle) | ||
3215 | { | ||
3216 | struct dlm_lock_resource *res; | ||
3217 | unsigned int hash; | ||
3218 | |||
3219 | /* Find the lockres associated to the mle and set its owner to UNK */ | ||
3220 | hash = dlm_lockid_hash(mle->u.mlename.name, mle->u.mlename.len); | ||
3221 | res = __dlm_lookup_lockres(dlm, mle->u.mlename.name, mle->u.mlename.len, | ||
3222 | hash); | ||
3223 | if (res) { | ||
3224 | spin_unlock(&dlm->master_lock); | ||
3225 | |||
3226 | /* move lockres onto recovery list */ | ||
3227 | spin_lock(&res->spinlock); | ||
3228 | dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
3229 | dlm_move_lockres_to_recovery_list(dlm, res); | ||
3230 | spin_unlock(&res->spinlock); | ||
3231 | dlm_lockres_put(res); | ||
3232 | |||
3233 | /* about to get rid of mle, detach from heartbeat */ | ||
3234 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3235 | |||
3236 | /* dump the mle */ | ||
3237 | spin_lock(&dlm->master_lock); | ||
3238 | __dlm_put_mle(mle); | ||
3239 | spin_unlock(&dlm->master_lock); | ||
3240 | } | ||
3241 | |||
3242 | return res; | ||
3243 | } | ||
3244 | |||
3245 | static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, | ||
3246 | struct dlm_master_list_entry *mle) | ||
3247 | { | ||
3248 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3249 | |||
3250 | spin_lock(&mle->spinlock); | ||
3251 | __dlm_unlink_mle(dlm, mle); | ||
3252 | atomic_set(&mle->woken, 1); | ||
3253 | spin_unlock(&mle->spinlock); | ||
3254 | |||
3255 | wake_up(&mle->wq); | ||
3256 | } | ||
3257 | |||
3258 | static void dlm_clean_block_mle(struct dlm_ctxt *dlm, | ||
3259 | struct dlm_master_list_entry *mle, u8 dead_node) | ||
3260 | { | ||
3261 | int bit; | ||
3262 | |||
3263 | BUG_ON(mle->type != DLM_MLE_BLOCK); | ||
3264 | |||
3265 | spin_lock(&mle->spinlock); | ||
3266 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | ||
3267 | if (bit != dead_node) { | ||
3268 | mlog(0, "mle found, but dead node %u would not have been " | ||
3269 | "master\n", dead_node); | ||
3270 | spin_unlock(&mle->spinlock); | ||
3271 | } else { | ||
3272 | /* Must drop the refcount by one since the assert_master will | ||
3273 | * never arrive. This may result in the mle being unlinked and | ||
3274 | * freed, but there may still be a process waiting in the | ||
3275 | * dlmlock path which is fine. */ | ||
3276 | mlog(0, "node %u was expected master\n", dead_node); | ||
3277 | atomic_set(&mle->woken, 1); | ||
3278 | spin_unlock(&mle->spinlock); | ||
3279 | wake_up(&mle->wq); | ||
3280 | |||
3281 | /* Do not need events any longer, so detach from heartbeat */ | ||
3282 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3283 | __dlm_put_mle(mle); | ||
3284 | } | ||
3285 | } | ||
3210 | 3286 | ||
3211 | void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) | 3287 | void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) |
3212 | { | 3288 | { |
3213 | struct dlm_master_list_entry *mle, *next; | 3289 | struct dlm_master_list_entry *mle, *next; |
3214 | struct dlm_lock_resource *res; | 3290 | struct dlm_lock_resource *res; |
3215 | unsigned int hash; | ||
3216 | 3291 | ||
3217 | mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); | 3292 | mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); |
3218 | top: | 3293 | top: |
@@ -3236,30 +3311,7 @@ top: | |||
3236 | * need to clean up if the dead node would have | 3311 | * need to clean up if the dead node would have |
3237 | * been the master. */ | 3312 | * been the master. */ |
3238 | if (mle->type == DLM_MLE_BLOCK) { | 3313 | if (mle->type == DLM_MLE_BLOCK) { |
3239 | int bit; | 3314 | dlm_clean_block_mle(dlm, mle, dead_node); |
3240 | |||
3241 | spin_lock(&mle->spinlock); | ||
3242 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | ||
3243 | if (bit != dead_node) { | ||
3244 | mlog(0, "mle found, but dead node %u would " | ||
3245 | "not have been master\n", dead_node); | ||
3246 | spin_unlock(&mle->spinlock); | ||
3247 | } else { | ||
3248 | /* must drop the refcount by one since the | ||
3249 | * assert_master will never arrive. this | ||
3250 | * may result in the mle being unlinked and | ||
3251 | * freed, but there may still be a process | ||
3252 | * waiting in the dlmlock path which is fine. */ | ||
3253 | mlog(0, "node %u was expected master\n", | ||
3254 | dead_node); | ||
3255 | atomic_set(&mle->woken, 1); | ||
3256 | spin_unlock(&mle->spinlock); | ||
3257 | wake_up(&mle->wq); | ||
3258 | /* do not need events any longer, so detach | ||
3259 | * from heartbeat */ | ||
3260 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3261 | __dlm_put_mle(mle); | ||
3262 | } | ||
3263 | continue; | 3315 | continue; |
3264 | } | 3316 | } |
3265 | 3317 | ||
@@ -3280,51 +3332,21 @@ top: | |||
3280 | 3332 | ||
3281 | /* if we have reached this point, this mle needs to | 3333 | /* if we have reached this point, this mle needs to |
3282 | * be removed from the list and freed. */ | 3334 | * be removed from the list and freed. */ |
3283 | 3335 | dlm_clean_migration_mle(dlm, mle); | |
3284 | /* remove from the list early. NOTE: unlinking | ||
3285 | * list_head while in list_for_each_safe */ | ||
3286 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3287 | spin_lock(&mle->spinlock); | ||
3288 | __dlm_unlink_mle(dlm, mle); | ||
3289 | atomic_set(&mle->woken, 1); | ||
3290 | spin_unlock(&mle->spinlock); | ||
3291 | wake_up(&mle->wq); | ||
3292 | 3336 | ||
3293 | mlog(0, "%s: node %u died during migration from " | 3337 | mlog(0, "%s: node %u died during migration from " |
3294 | "%u to %u!\n", dlm->name, dead_node, | 3338 | "%u to %u!\n", dlm->name, dead_node, |
3295 | mle->master, mle->new_master); | 3339 | mle->master, mle->new_master); |
3296 | /* if there is a lockres associated with this | ||
3297 | * mle, find it and set its owner to UNKNOWN */ | ||
3298 | hash = dlm_lockid_hash(mle->u.mlename.name, mle->u.mlename.len); | ||
3299 | res = __dlm_lookup_lockres(dlm, mle->u.mlename.name, | ||
3300 | mle->u.mlename.len, hash); | ||
3301 | if (res) { | ||
3302 | /* unfortunately if we hit this rare case, our | ||
3303 | * lock ordering is messed. we need to drop | ||
3304 | * the master lock so that we can take the | ||
3305 | * lockres lock, meaning that we will have to | ||
3306 | * restart from the head of list. */ | ||
3307 | spin_unlock(&dlm->master_lock); | ||
3308 | |||
3309 | /* move lockres onto recovery list */ | ||
3310 | spin_lock(&res->spinlock); | ||
3311 | dlm_set_lockres_owner(dlm, res, | ||
3312 | DLM_LOCK_RES_OWNER_UNKNOWN); | ||
3313 | dlm_move_lockres_to_recovery_list(dlm, res); | ||
3314 | spin_unlock(&res->spinlock); | ||
3315 | dlm_lockres_put(res); | ||
3316 | |||
3317 | /* about to get rid of mle, detach from heartbeat */ | ||
3318 | __dlm_mle_detach_hb_events(dlm, mle); | ||
3319 | |||
3320 | /* dump the mle */ | ||
3321 | spin_lock(&dlm->master_lock); | ||
3322 | __dlm_put_mle(mle); | ||
3323 | spin_unlock(&dlm->master_lock); | ||
3324 | 3340 | ||
3341 | /* If we find a lockres associated with the mle, we've | ||
3342 | * hit this rare case that messes up our lock ordering. | ||
3343 | * If so, we need to drop the master lock so that we can | ||
3344 | * take the lockres lock, meaning that we will have to | ||
3345 | * restart from the head of list. */ | ||
3346 | res = dlm_reset_mleres_owner(dlm, mle); | ||
3347 | if (res) | ||
3325 | /* restart */ | 3348 | /* restart */ |
3326 | goto top; | 3349 | goto top; |
3327 | } | ||
3328 | 3350 | ||
3329 | /* this may be the last reference */ | 3351 | /* this may be the last reference */ |
3330 | __dlm_put_mle(mle); | 3352 | __dlm_put_mle(mle); |