aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ocfs2/dlm/dlmrecovery.c
diff options
context:
space:
mode:
authorMark Fasheh <mark.fasheh@oracle.com>2006-02-28 20:31:22 -0500
committerMark Fasheh <mark.fasheh@oracle.com>2006-03-01 15:18:16 -0500
commit81f2094a631df1ba275f4d4bd7ea5bacfd8dbcfc (patch)
tree20efc0b486ec9cb260d22dd09f02de13c0e71eb1 /fs/ocfs2/dlm/dlmrecovery.c
parentb7668c72d2ae004363fb0588600bfa942e1b245c (diff)
[PATCH] ocfs2: use hlists for lockres hash
Switch from list_head to hlist_head. Make the size of the hash dependent upon the allocated area, rather than a constant. Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs/ocfs2/dlm/dlmrecovery.c')
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index ed76bda1a534..1e232000f3f7 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1693,7 +1693,10 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1693 u8 dead_node, u8 new_master) 1693 u8 dead_node, u8 new_master)
1694{ 1694{
1695 int i; 1695 int i;
1696 struct list_head *iter, *iter2, *bucket; 1696 struct list_head *iter, *iter2;
1697 struct hlist_node *hash_iter;
1698 struct hlist_head *bucket;
1699
1697 struct dlm_lock_resource *res; 1700 struct dlm_lock_resource *res;
1698 1701
1699 mlog_entry_void(); 1702 mlog_entry_void();
@@ -1717,10 +1720,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1717 * for now we need to run the whole hash, clear 1720 * for now we need to run the whole hash, clear
1718 * the RECOVERING state and set the owner 1721 * the RECOVERING state and set the owner
1719 * if necessary */ 1722 * if necessary */
1720 for (i=0; i<DLM_HASH_SIZE; i++) { 1723 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1721 bucket = &(dlm->resources[i]); 1724 bucket = &(dlm->lockres_hash[i]);
1722 list_for_each(iter, bucket) { 1725 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
1723 res = list_entry (iter, struct dlm_lock_resource, list);
1724 if (res->state & DLM_LOCK_RES_RECOVERING) { 1726 if (res->state & DLM_LOCK_RES_RECOVERING) {
1725 if (res->owner == dead_node) { 1727 if (res->owner == dead_node) {
1726 mlog(0, "(this=%u) res %.*s owner=%u " 1728 mlog(0, "(this=%u) res %.*s owner=%u "
@@ -1852,10 +1854,10 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
1852 1854
1853static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) 1855static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1854{ 1856{
1855 struct list_head *iter; 1857 struct hlist_node *iter;
1856 struct dlm_lock_resource *res; 1858 struct dlm_lock_resource *res;
1857 int i; 1859 int i;
1858 struct list_head *bucket; 1860 struct hlist_head *bucket;
1859 struct dlm_lock *lock; 1861 struct dlm_lock *lock;
1860 1862
1861 1863
@@ -1876,10 +1878,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1876 * can be kicked again to see if any ASTs or BASTs 1878 * can be kicked again to see if any ASTs or BASTs
1877 * need to be fired as a result. 1879 * need to be fired as a result.
1878 */ 1880 */
1879 for (i=0; i<DLM_HASH_SIZE; i++) { 1881 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1880 bucket = &(dlm->resources[i]); 1882 bucket = &(dlm->lockres_hash[i]);
1881 list_for_each(iter, bucket) { 1883 hlist_for_each_entry(res, iter, bucket, hash_node) {
1882 res = list_entry (iter, struct dlm_lock_resource, list);
1883 /* always prune any $RECOVERY entries for dead nodes, 1884 /* always prune any $RECOVERY entries for dead nodes,
1884 * otherwise hangs can occur during later recovery */ 1885 * otherwise hangs can occur during later recovery */
1885 if (dlm_is_recovery_lock(res->lockname.name, 1886 if (dlm_is_recovery_lock(res->lockname.name,