diff options
author | Sunil Mushran <sunil.mushran@oracle.com> | 2009-02-26 18:00:42 -0500 |
---|---|---|
committer | Mark Fasheh <mfasheh@suse.com> | 2009-04-03 14:39:21 -0400 |
commit | 67ae1f0604da3bcf3ed6dec59ac71d07e54a404c (patch) | |
tree | dd97a77e0d718d6d154c41a7c5178c1cfcb709b6 /fs/ocfs2/dlm/dlmmaster.c | |
parent | 2ed6c750d645d09b5948e46fada3ca1fda3157b5 (diff) |
ocfs2/dlm: Indent dlm_cleanup_master_list()
The previous patch explicitly did not indent dlm_cleanup_master_list()
so as to make the patch readable. This patch properly indents the
function.
Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
Signed-off-by: Mark Fasheh <mfasheh@suse.com>
Diffstat (limited to 'fs/ocfs2/dlm/dlmmaster.c')
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 106 |
1 files changed, 52 insertions, 54 deletions
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 804558174a77..604552ebb468 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -3320,66 +3320,64 @@ top: | |||
3320 | mle = hlist_entry(list, struct dlm_master_list_entry, | 3320 | mle = hlist_entry(list, struct dlm_master_list_entry, |
3321 | master_hash_node); | 3321 | master_hash_node); |
3322 | 3322 | ||
3323 | BUG_ON(mle->type != DLM_MLE_BLOCK && | 3323 | BUG_ON(mle->type != DLM_MLE_BLOCK && |
3324 | mle->type != DLM_MLE_MASTER && | 3324 | mle->type != DLM_MLE_MASTER && |
3325 | mle->type != DLM_MLE_MIGRATION); | 3325 | mle->type != DLM_MLE_MIGRATION); |
3326 | 3326 | ||
3327 | /* MASTER mles are initiated locally. the waiting | 3327 | /* MASTER mles are initiated locally. The waiting |
3328 | * process will notice the node map change | 3328 | * process will notice the node map change shortly. |
3329 | * shortly. let that happen as normal. */ | 3329 | * Let that happen as normal. */ |
3330 | if (mle->type == DLM_MLE_MASTER) | 3330 | if (mle->type == DLM_MLE_MASTER) |
3331 | continue; | 3331 | continue; |
3332 | 3332 | ||
3333 | /* BLOCK mles are initiated by other nodes. Need to | ||
3334 | * clean up if the dead node would have been the | ||
3335 | * master. */ | ||
3336 | if (mle->type == DLM_MLE_BLOCK) { | ||
3337 | dlm_clean_block_mle(dlm, mle, dead_node); | ||
3338 | continue; | ||
3339 | } | ||
3333 | 3340 | ||
3334 | /* BLOCK mles are initiated by other nodes. | 3341 | /* Everything else is a MIGRATION mle */ |
3335 | * need to clean up if the dead node would have | 3342 | |
3336 | * been the master. */ | 3343 | /* The rule for MIGRATION mles is that the master |
3337 | if (mle->type == DLM_MLE_BLOCK) { | 3344 | * becomes UNKNOWN if *either* the original or the new |
3338 | dlm_clean_block_mle(dlm, mle, dead_node); | 3345 | * master dies. All UNKNOWN lockres' are sent to |
3339 | continue; | 3346 | * whichever node becomes the recovery master. The new |
3347 | * master is responsible for determining if there is | ||
3348 | * still a master for this lockres, or if he needs to | ||
3349 | * take over mastery. Either way, this node should | ||
3350 | * expect another message to resolve this. */ | ||
3351 | |||
3352 | if (mle->master != dead_node && | ||
3353 | mle->new_master != dead_node) | ||
3354 | continue; | ||
3355 | |||
3356 | /* If we have reached this point, this mle needs to be | ||
3357 | * removed from the list and freed. */ | ||
3358 | dlm_clean_migration_mle(dlm, mle); | ||
3359 | |||
3360 | mlog(0, "%s: node %u died during migration from " | ||
3361 | "%u to %u!\n", dlm->name, dead_node, mle->master, | ||
3362 | mle->new_master); | ||
3363 | |||
3364 | /* If we find a lockres associated with the mle, we've | ||
3365 | * hit this rare case that messes up our lock ordering. | ||
3366 | * If so, we need to drop the master lock so that we can | ||
3367 | * take the lockres lock, meaning that we will have to | ||
3368 | * restart from the head of list. */ | ||
3369 | res = dlm_reset_mleres_owner(dlm, mle); | ||
3370 | if (res) | ||
3371 | /* restart */ | ||
3372 | goto top; | ||
3373 | |||
3374 | /* This may be the last reference */ | ||
3375 | __dlm_put_mle(mle); | ||
3340 | } | 3376 | } |
3341 | |||
3342 | /* everything else is a MIGRATION mle */ | ||
3343 | |||
3344 | /* the rule for MIGRATION mles is that the master | ||
3345 | * becomes UNKNOWN if *either* the original or | ||
3346 | * the new master dies. all UNKNOWN lockreses | ||
3347 | * are sent to whichever node becomes the recovery | ||
3348 | * master. the new master is responsible for | ||
3349 | * determining if there is still a master for | ||
3350 | * this lockres, or if he needs to take over | ||
3351 | * mastery. either way, this node should expect | ||
3352 | * another message to resolve this. */ | ||
3353 | if (mle->master != dead_node && | ||
3354 | mle->new_master != dead_node) | ||
3355 | continue; | ||
3356 | |||
3357 | /* if we have reached this point, this mle needs to | ||
3358 | * be removed from the list and freed. */ | ||
3359 | dlm_clean_migration_mle(dlm, mle); | ||
3360 | |||
3361 | mlog(0, "%s: node %u died during migration from " | ||
3362 | "%u to %u!\n", dlm->name, dead_node, | ||
3363 | mle->master, mle->new_master); | ||
3364 | |||
3365 | /* If we find a lockres associated with the mle, we've | ||
3366 | * hit this rare case that messes up our lock ordering. | ||
3367 | * If so, we need to drop the master lock so that we can | ||
3368 | * take the lockres lock, meaning that we will have to | ||
3369 | * restart from the head of list. */ | ||
3370 | res = dlm_reset_mleres_owner(dlm, mle); | ||
3371 | if (res) | ||
3372 | /* restart */ | ||
3373 | goto top; | ||
3374 | |||
3375 | /* this may be the last reference */ | ||
3376 | __dlm_put_mle(mle); | ||
3377 | } | ||
3378 | } | 3377 | } |
3379 | spin_unlock(&dlm->master_lock); | 3378 | spin_unlock(&dlm->master_lock); |
3380 | } | 3379 | } |
3381 | 3380 | ||
3382 | |||
3383 | int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | 3381 | int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, |
3384 | u8 old_master) | 3382 | u8 old_master) |
3385 | { | 3383 | { |