diff options
author | Kurt Hackel <kurt.hackel@oracle.com> | 2007-01-05 18:04:49 -0500 |
---|---|---|
committer | Mark Fasheh <mark.fasheh@oracle.com> | 2007-02-07 15:02:40 -0500 |
commit | e17e75ecb86b8ce9b51b219b5348517561031f80 (patch) | |
tree | 358001a91077fc9e62bd8def833f622d93c365cf | |
parent | 71ac1062435ba2d58bf64817b47a6e44f316752e (diff) |
ocfs2_dlm: Fix migrate lockres handler queue scanning
The migrate lockres handler was only searching for its lock on
migrated lockres on the expected queue. This could be problematic
as the new master could have also issued a convert request
during the migration and thus moved the lock to the convert queue.
We now search for the lock on all three queues.
Signed-off-by: Kurt Hackel <kurt.hackel@oracle.com>
Signed-off-by: Sunil Mushran <Sunil.Mushran@oracle.com>
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 26 |
1 files changed, 20 insertions, 6 deletions
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 3057b65a4b8b..f93315c98871 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -1708,10 +1708,11 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
1708 | { | 1708 | { |
1709 | struct dlm_migratable_lock *ml; | 1709 | struct dlm_migratable_lock *ml; |
1710 | struct list_head *queue; | 1710 | struct list_head *queue; |
1711 | struct list_head *tmpq = NULL; | ||
1711 | struct dlm_lock *newlock = NULL; | 1712 | struct dlm_lock *newlock = NULL; |
1712 | struct dlm_lockstatus *lksb = NULL; | 1713 | struct dlm_lockstatus *lksb = NULL; |
1713 | int ret = 0; | 1714 | int ret = 0; |
1714 | int i, bad; | 1715 | int i, j, bad; |
1715 | struct list_head *iter; | 1716 | struct list_head *iter; |
1716 | struct dlm_lock *lock = NULL; | 1717 | struct dlm_lock *lock = NULL; |
1717 | u8 from = O2NM_MAX_NODES; | 1718 | u8 from = O2NM_MAX_NODES; |
@@ -1738,6 +1739,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
1738 | lksb = NULL; | 1739 | lksb = NULL; |
1739 | 1740 | ||
1740 | queue = dlm_list_num_to_pointer(res, ml->list); | 1741 | queue = dlm_list_num_to_pointer(res, ml->list); |
1742 | tmpq = NULL; | ||
1741 | 1743 | ||
1742 | /* if the lock is for the local node it needs to | 1744 | /* if the lock is for the local node it needs to |
1743 | * be moved to the proper location within the queue. | 1745 | * be moved to the proper location within the queue. |
@@ -1747,11 +1749,16 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
1747 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); | 1749 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); |
1748 | 1750 | ||
1749 | spin_lock(&res->spinlock); | 1751 | spin_lock(&res->spinlock); |
1750 | list_for_each(iter, queue) { | 1752 | for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { |
1751 | lock = list_entry (iter, struct dlm_lock, list); | 1753 | tmpq = dlm_list_idx_to_ptr(res, j); |
1752 | if (lock->ml.cookie != ml->cookie) | 1754 | list_for_each(iter, tmpq) { |
1753 | lock = NULL; | 1755 | lock = list_entry (iter, struct dlm_lock, list); |
1754 | else | 1756 | if (lock->ml.cookie != ml->cookie) |
1757 | lock = NULL; | ||
1758 | else | ||
1759 | break; | ||
1760 | } | ||
1761 | if (lock) | ||
1755 | break; | 1762 | break; |
1756 | } | 1763 | } |
1757 | 1764 | ||
@@ -1768,6 +1775,13 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | |||
1768 | } | 1775 | } |
1769 | BUG_ON(lock->ml.node != ml->node); | 1776 | BUG_ON(lock->ml.node != ml->node); |
1770 | 1777 | ||
1778 | if (tmpq != queue) { | ||
1779 | mlog(0, "lock was on %u instead of %u for %.*s\n", | ||
1780 | j, ml->list, res->lockname.len, res->lockname.name); | ||
1781 | spin_unlock(&res->spinlock); | ||
1782 | continue; | ||
1783 | } | ||
1784 | |||
1771 | /* see NOTE above about why we do not update | 1785 | /* see NOTE above about why we do not update |
1772 | * to match the master here */ | 1786 | * to match the master here */ |
1773 | 1787 | ||