diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-11 14:28:34 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-11 14:28:34 -0500 |
commit | 498f7f505dc79934c878c7667840c50c64f232fc (patch) | |
tree | 67eca6dcb6fe76ec3d2bdef5e3102591fe957776 /fs/ocfs2 | |
parent | 0969d11e201b82d30a158ccdb3aca67a7b845613 (diff) | |
parent | d6351db2073315ddebac72cc1935e912f60f86e0 (diff) |
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2
* 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2: (22 commits)
MAINTAINERS: Update Joel Becker's email address
ocfs2: Remove unused truncate function from alloc.c
ocfs2/cluster: dereferencing before checking in nst_seq_show()
ocfs2: fix build for OCFS2_FS_STATS not enabled
ocfs2/cluster: Show o2net timing statistics
ocfs2/cluster: Track process message timing stats for each socket
ocfs2/cluster: Track send message timing stats for each socket
ocfs2/cluster: Use ktime instead of timeval in struct o2net_sock_container
ocfs2/cluster: Replace timeval with ktime in struct o2net_send_tracking
ocfs2: Add DEBUG_FS dependency
ocfs2/dlm: Hard code the values for enums
ocfs2/dlm: Minor cleanup
ocfs2/dlm: Cleanup dlmdebug.c
ocfs2: Release buffer_head in case of error in ocfs2_double_lock.
ocfs2/cluster: Pin the local node when o2hb thread starts
ocfs2/cluster: Show pin state for each o2hb region
ocfs2/cluster: Pin/unpin o2hb regions
ocfs2/cluster: Remove dropped region from o2hb quorum region bitmap
ocfs2/cluster: Pin the remote node item in configfs
ocfs2/dlm: make existing convertion precedent over new lock
...
Diffstat (limited to 'fs/ocfs2')
-rw-r--r-- | fs/ocfs2/Kconfig | 2 | ||||
-rw-r--r-- | fs/ocfs2/alloc.c | 77 | ||||
-rw-r--r-- | fs/ocfs2/alloc.h | 4 | ||||
-rw-r--r-- | fs/ocfs2/aops.c | 59 | ||||
-rw-r--r-- | fs/ocfs2/cluster/heartbeat.c | 246 | ||||
-rw-r--r-- | fs/ocfs2/cluster/netdebug.c | 286 | ||||
-rw-r--r-- | fs/ocfs2/cluster/tcp.c | 145 | ||||
-rw-r--r-- | fs/ocfs2/cluster/tcp_internal.h | 33 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmast.c | 76 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmcommon.h | 86 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.c | 200 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.h | 5 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 10 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmlock.c | 3 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 132 | ||||
-rw-r--r-- | fs/ocfs2/namei.c | 5 | ||||
-rw-r--r-- | fs/ocfs2/ocfs2.h | 5 |
17 files changed, 842 insertions, 532 deletions
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig index 0d840669698e..ab152c00cd3a 100644 --- a/fs/ocfs2/Kconfig +++ b/fs/ocfs2/Kconfig | |||
@@ -51,7 +51,7 @@ config OCFS2_FS_USERSPACE_CLUSTER | |||
51 | 51 | ||
52 | config OCFS2_FS_STATS | 52 | config OCFS2_FS_STATS |
53 | bool "OCFS2 statistics" | 53 | bool "OCFS2 statistics" |
54 | depends on OCFS2_FS | 54 | depends on OCFS2_FS && DEBUG_FS |
55 | default y | 55 | default y |
56 | help | 56 | help |
57 | This option allows some fs statistics to be captured. Enabling | 57 | This option allows some fs statistics to be captured. Enabling |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 592fae5007d1..e4984e259cb6 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -565,7 +565,6 @@ static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et) | |||
565 | return ret; | 565 | return ret; |
566 | } | 566 | } |
567 | 567 | ||
568 | static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc); | ||
569 | static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, | 568 | static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, |
570 | struct ocfs2_extent_block *eb); | 569 | struct ocfs2_extent_block *eb); |
571 | static void ocfs2_adjust_rightmost_records(handle_t *handle, | 570 | static void ocfs2_adjust_rightmost_records(handle_t *handle, |
@@ -5858,6 +5857,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, | |||
5858 | 5857 | ||
5859 | ocfs2_journal_dirty(handle, tl_bh); | 5858 | ocfs2_journal_dirty(handle, tl_bh); |
5860 | 5859 | ||
5860 | osb->truncated_clusters += num_clusters; | ||
5861 | bail: | 5861 | bail: |
5862 | mlog_exit(status); | 5862 | mlog_exit(status); |
5863 | return status; | 5863 | return status; |
@@ -5929,6 +5929,8 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, | |||
5929 | i--; | 5929 | i--; |
5930 | } | 5930 | } |
5931 | 5931 | ||
5932 | osb->truncated_clusters = 0; | ||
5933 | |||
5932 | bail: | 5934 | bail: |
5933 | mlog_exit(status); | 5935 | mlog_exit(status); |
5934 | return status; | 5936 | return status; |
@@ -7139,64 +7141,6 @@ bail: | |||
7139 | } | 7141 | } |
7140 | 7142 | ||
7141 | /* | 7143 | /* |
7142 | * Expects the inode to already be locked. | ||
7143 | */ | ||
7144 | int ocfs2_prepare_truncate(struct ocfs2_super *osb, | ||
7145 | struct inode *inode, | ||
7146 | struct buffer_head *fe_bh, | ||
7147 | struct ocfs2_truncate_context **tc) | ||
7148 | { | ||
7149 | int status; | ||
7150 | unsigned int new_i_clusters; | ||
7151 | struct ocfs2_dinode *fe; | ||
7152 | struct ocfs2_extent_block *eb; | ||
7153 | struct buffer_head *last_eb_bh = NULL; | ||
7154 | |||
7155 | mlog_entry_void(); | ||
7156 | |||
7157 | *tc = NULL; | ||
7158 | |||
7159 | new_i_clusters = ocfs2_clusters_for_bytes(osb->sb, | ||
7160 | i_size_read(inode)); | ||
7161 | fe = (struct ocfs2_dinode *) fe_bh->b_data; | ||
7162 | |||
7163 | mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size =" | ||
7164 | "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters, | ||
7165 | (unsigned long long)le64_to_cpu(fe->i_size)); | ||
7166 | |||
7167 | *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL); | ||
7168 | if (!(*tc)) { | ||
7169 | status = -ENOMEM; | ||
7170 | mlog_errno(status); | ||
7171 | goto bail; | ||
7172 | } | ||
7173 | ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc); | ||
7174 | |||
7175 | if (fe->id2.i_list.l_tree_depth) { | ||
7176 | status = ocfs2_read_extent_block(INODE_CACHE(inode), | ||
7177 | le64_to_cpu(fe->i_last_eb_blk), | ||
7178 | &last_eb_bh); | ||
7179 | if (status < 0) { | ||
7180 | mlog_errno(status); | ||
7181 | goto bail; | ||
7182 | } | ||
7183 | eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; | ||
7184 | } | ||
7185 | |||
7186 | (*tc)->tc_last_eb_bh = last_eb_bh; | ||
7187 | |||
7188 | status = 0; | ||
7189 | bail: | ||
7190 | if (status < 0) { | ||
7191 | if (*tc) | ||
7192 | ocfs2_free_truncate_context(*tc); | ||
7193 | *tc = NULL; | ||
7194 | } | ||
7195 | mlog_exit_void(); | ||
7196 | return status; | ||
7197 | } | ||
7198 | |||
7199 | /* | ||
7200 | * 'start' is inclusive, 'end' is not. | 7144 | * 'start' is inclusive, 'end' is not. |
7201 | */ | 7145 | */ |
7202 | int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, | 7146 | int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh, |
@@ -7270,18 +7214,3 @@ out_commit: | |||
7270 | out: | 7214 | out: |
7271 | return ret; | 7215 | return ret; |
7272 | } | 7216 | } |
7273 | |||
7274 | static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc) | ||
7275 | { | ||
7276 | /* | ||
7277 | * The caller is responsible for completing deallocation | ||
7278 | * before freeing the context. | ||
7279 | */ | ||
7280 | if (tc->tc_dealloc.c_first_suballocator != NULL) | ||
7281 | mlog(ML_NOTICE, | ||
7282 | "Truncate completion has non-empty dealloc context\n"); | ||
7283 | |||
7284 | brelse(tc->tc_last_eb_bh); | ||
7285 | |||
7286 | kfree(tc); | ||
7287 | } | ||
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 55762b554b99..3bd08a03251c 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h | |||
@@ -228,10 +228,6 @@ struct ocfs2_truncate_context { | |||
228 | 228 | ||
229 | int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, | 229 | int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, |
230 | u64 range_start, u64 range_end); | 230 | u64 range_start, u64 range_end); |
231 | int ocfs2_prepare_truncate(struct ocfs2_super *osb, | ||
232 | struct inode *inode, | ||
233 | struct buffer_head *fe_bh, | ||
234 | struct ocfs2_truncate_context **tc); | ||
235 | int ocfs2_commit_truncate(struct ocfs2_super *osb, | 231 | int ocfs2_commit_truncate(struct ocfs2_super *osb, |
236 | struct inode *inode, | 232 | struct inode *inode, |
237 | struct buffer_head *di_bh); | 233 | struct buffer_head *di_bh); |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 0d7c5540ad66..1fbb0e20131b 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -1630,6 +1630,43 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, | |||
1630 | return ret; | 1630 | return ret; |
1631 | } | 1631 | } |
1632 | 1632 | ||
1633 | /* | ||
1634 | * Try to flush truncate logs if we can free enough clusters from it. | ||
1635 | * As for return value, "< 0" means error, "0" no space and "1" means | ||
1636 | * we have freed enough spaces and let the caller try to allocate again. | ||
1637 | */ | ||
1638 | static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb, | ||
1639 | unsigned int needed) | ||
1640 | { | ||
1641 | tid_t target; | ||
1642 | int ret = 0; | ||
1643 | unsigned int truncated_clusters; | ||
1644 | |||
1645 | mutex_lock(&osb->osb_tl_inode->i_mutex); | ||
1646 | truncated_clusters = osb->truncated_clusters; | ||
1647 | mutex_unlock(&osb->osb_tl_inode->i_mutex); | ||
1648 | |||
1649 | /* | ||
1650 | * Check whether we can succeed in allocating if we free | ||
1651 | * the truncate log. | ||
1652 | */ | ||
1653 | if (truncated_clusters < needed) | ||
1654 | goto out; | ||
1655 | |||
1656 | ret = ocfs2_flush_truncate_log(osb); | ||
1657 | if (ret) { | ||
1658 | mlog_errno(ret); | ||
1659 | goto out; | ||
1660 | } | ||
1661 | |||
1662 | if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) { | ||
1663 | jbd2_log_wait_commit(osb->journal->j_journal, target); | ||
1664 | ret = 1; | ||
1665 | } | ||
1666 | out: | ||
1667 | return ret; | ||
1668 | } | ||
1669 | |||
1633 | int ocfs2_write_begin_nolock(struct file *filp, | 1670 | int ocfs2_write_begin_nolock(struct file *filp, |
1634 | struct address_space *mapping, | 1671 | struct address_space *mapping, |
1635 | loff_t pos, unsigned len, unsigned flags, | 1672 | loff_t pos, unsigned len, unsigned flags, |
@@ -1637,7 +1674,7 @@ int ocfs2_write_begin_nolock(struct file *filp, | |||
1637 | struct buffer_head *di_bh, struct page *mmap_page) | 1674 | struct buffer_head *di_bh, struct page *mmap_page) |
1638 | { | 1675 | { |
1639 | int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; | 1676 | int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; |
1640 | unsigned int clusters_to_alloc, extents_to_split; | 1677 | unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; |
1641 | struct ocfs2_write_ctxt *wc; | 1678 | struct ocfs2_write_ctxt *wc; |
1642 | struct inode *inode = mapping->host; | 1679 | struct inode *inode = mapping->host; |
1643 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | 1680 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
@@ -1646,7 +1683,9 @@ int ocfs2_write_begin_nolock(struct file *filp, | |||
1646 | struct ocfs2_alloc_context *meta_ac = NULL; | 1683 | struct ocfs2_alloc_context *meta_ac = NULL; |
1647 | handle_t *handle; | 1684 | handle_t *handle; |
1648 | struct ocfs2_extent_tree et; | 1685 | struct ocfs2_extent_tree et; |
1686 | int try_free = 1, ret1; | ||
1649 | 1687 | ||
1688 | try_again: | ||
1650 | ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); | 1689 | ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); |
1651 | if (ret) { | 1690 | if (ret) { |
1652 | mlog_errno(ret); | 1691 | mlog_errno(ret); |
@@ -1681,6 +1720,7 @@ int ocfs2_write_begin_nolock(struct file *filp, | |||
1681 | mlog_errno(ret); | 1720 | mlog_errno(ret); |
1682 | goto out; | 1721 | goto out; |
1683 | } else if (ret == 1) { | 1722 | } else if (ret == 1) { |
1723 | clusters_need = wc->w_clen; | ||
1684 | ret = ocfs2_refcount_cow(inode, filp, di_bh, | 1724 | ret = ocfs2_refcount_cow(inode, filp, di_bh, |
1685 | wc->w_cpos, wc->w_clen, UINT_MAX); | 1725 | wc->w_cpos, wc->w_clen, UINT_MAX); |
1686 | if (ret) { | 1726 | if (ret) { |
@@ -1695,6 +1735,7 @@ int ocfs2_write_begin_nolock(struct file *filp, | |||
1695 | mlog_errno(ret); | 1735 | mlog_errno(ret); |
1696 | goto out; | 1736 | goto out; |
1697 | } | 1737 | } |
1738 | clusters_need += clusters_to_alloc; | ||
1698 | 1739 | ||
1699 | di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; | 1740 | di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; |
1700 | 1741 | ||
@@ -1817,6 +1858,22 @@ out: | |||
1817 | ocfs2_free_alloc_context(data_ac); | 1858 | ocfs2_free_alloc_context(data_ac); |
1818 | if (meta_ac) | 1859 | if (meta_ac) |
1819 | ocfs2_free_alloc_context(meta_ac); | 1860 | ocfs2_free_alloc_context(meta_ac); |
1861 | |||
1862 | if (ret == -ENOSPC && try_free) { | ||
1863 | /* | ||
1864 | * Try to free some truncate log so that we can have enough | ||
1865 | * clusters to allocate. | ||
1866 | */ | ||
1867 | try_free = 0; | ||
1868 | |||
1869 | ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); | ||
1870 | if (ret1 == 1) | ||
1871 | goto try_again; | ||
1872 | |||
1873 | if (ret1 < 0) | ||
1874 | mlog_errno(ret1); | ||
1875 | } | ||
1876 | |||
1820 | return ret; | 1877 | return ret; |
1821 | } | 1878 | } |
1822 | 1879 | ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9e3d45bcb5fd..a6cc05302e9f 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -82,6 +82,7 @@ static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; | |||
82 | #define O2HB_DB_TYPE_REGION_LIVENODES 4 | 82 | #define O2HB_DB_TYPE_REGION_LIVENODES 4 |
83 | #define O2HB_DB_TYPE_REGION_NUMBER 5 | 83 | #define O2HB_DB_TYPE_REGION_NUMBER 5 |
84 | #define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6 | 84 | #define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6 |
85 | #define O2HB_DB_TYPE_REGION_PINNED 7 | ||
85 | struct o2hb_debug_buf { | 86 | struct o2hb_debug_buf { |
86 | int db_type; | 87 | int db_type; |
87 | int db_size; | 88 | int db_size; |
@@ -101,6 +102,7 @@ static struct o2hb_debug_buf *o2hb_db_failedregions; | |||
101 | #define O2HB_DEBUG_FAILEDREGIONS "failed_regions" | 102 | #define O2HB_DEBUG_FAILEDREGIONS "failed_regions" |
102 | #define O2HB_DEBUG_REGION_NUMBER "num" | 103 | #define O2HB_DEBUG_REGION_NUMBER "num" |
103 | #define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms" | 104 | #define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms" |
105 | #define O2HB_DEBUG_REGION_PINNED "pinned" | ||
104 | 106 | ||
105 | static struct dentry *o2hb_debug_dir; | 107 | static struct dentry *o2hb_debug_dir; |
106 | static struct dentry *o2hb_debug_livenodes; | 108 | static struct dentry *o2hb_debug_livenodes; |
@@ -132,6 +134,33 @@ char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = { | |||
132 | unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; | 134 | unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; |
133 | unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL; | 135 | unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL; |
134 | 136 | ||
137 | /* | ||
138 | * o2hb_dependent_users tracks the number of registered callbacks that depend | ||
139 | * on heartbeat. o2net and o2dlm are two entities that register this callback. | ||
140 | * However only o2dlm depends on the heartbeat. It does not want the heartbeat | ||
141 | * to stop while a dlm domain is still active. | ||
142 | */ | ||
143 | unsigned int o2hb_dependent_users; | ||
144 | |||
145 | /* | ||
146 | * In global heartbeat mode, all regions are pinned if there are one or more | ||
147 | * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All | ||
148 | * regions are unpinned if the region count exceeds the cut off or the number | ||
149 | * of dependent users falls to zero. | ||
150 | */ | ||
151 | #define O2HB_PIN_CUT_OFF 3 | ||
152 | |||
153 | /* | ||
154 | * In local heartbeat mode, we assume the dlm domain name to be the same as | ||
155 | * region uuid. This is true for domains created for the file system but not | ||
156 | * necessarily true for userdlm domains. This is a known limitation. | ||
157 | * | ||
158 | * In global heartbeat mode, we pin/unpin all o2hb regions. This solution | ||
159 | * works for both file system and userdlm domains. | ||
160 | */ | ||
161 | static int o2hb_region_pin(const char *region_uuid); | ||
162 | static void o2hb_region_unpin(const char *region_uuid); | ||
163 | |||
135 | /* Only sets a new threshold if there are no active regions. | 164 | /* Only sets a new threshold if there are no active regions. |
136 | * | 165 | * |
137 | * No locking or otherwise interesting code is required for reading | 166 | * No locking or otherwise interesting code is required for reading |
@@ -186,7 +215,9 @@ struct o2hb_region { | |||
186 | struct config_item hr_item; | 215 | struct config_item hr_item; |
187 | 216 | ||
188 | struct list_head hr_all_item; | 217 | struct list_head hr_all_item; |
189 | unsigned hr_unclean_stop:1; | 218 | unsigned hr_unclean_stop:1, |
219 | hr_item_pinned:1, | ||
220 | hr_item_dropped:1; | ||
190 | 221 | ||
191 | /* protected by the hr_callback_sem */ | 222 | /* protected by the hr_callback_sem */ |
192 | struct task_struct *hr_task; | 223 | struct task_struct *hr_task; |
@@ -212,9 +243,11 @@ struct o2hb_region { | |||
212 | struct dentry *hr_debug_livenodes; | 243 | struct dentry *hr_debug_livenodes; |
213 | struct dentry *hr_debug_regnum; | 244 | struct dentry *hr_debug_regnum; |
214 | struct dentry *hr_debug_elapsed_time; | 245 | struct dentry *hr_debug_elapsed_time; |
246 | struct dentry *hr_debug_pinned; | ||
215 | struct o2hb_debug_buf *hr_db_livenodes; | 247 | struct o2hb_debug_buf *hr_db_livenodes; |
216 | struct o2hb_debug_buf *hr_db_regnum; | 248 | struct o2hb_debug_buf *hr_db_regnum; |
217 | struct o2hb_debug_buf *hr_db_elapsed_time; | 249 | struct o2hb_debug_buf *hr_db_elapsed_time; |
250 | struct o2hb_debug_buf *hr_db_pinned; | ||
218 | 251 | ||
219 | /* let the person setting up hb wait for it to return until it | 252 | /* let the person setting up hb wait for it to return until it |
220 | * has reached a 'steady' state. This will be fixed when we have | 253 | * has reached a 'steady' state. This will be fixed when we have |
@@ -701,6 +734,14 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, | |||
701 | config_item_name(®->hr_item)); | 734 | config_item_name(®->hr_item)); |
702 | 735 | ||
703 | set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | 736 | set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); |
737 | |||
738 | /* | ||
739 | * If global heartbeat active, unpin all regions if the | ||
740 | * region count > CUT_OFF | ||
741 | */ | ||
742 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | ||
743 | O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) | ||
744 | o2hb_region_unpin(NULL); | ||
704 | } | 745 | } |
705 | 746 | ||
706 | static int o2hb_check_slot(struct o2hb_region *reg, | 747 | static int o2hb_check_slot(struct o2hb_region *reg, |
@@ -1041,6 +1082,9 @@ static int o2hb_thread(void *data) | |||
1041 | 1082 | ||
1042 | set_user_nice(current, -20); | 1083 | set_user_nice(current, -20); |
1043 | 1084 | ||
1085 | /* Pin node */ | ||
1086 | o2nm_depend_this_node(); | ||
1087 | |||
1044 | while (!kthread_should_stop() && !reg->hr_unclean_stop) { | 1088 | while (!kthread_should_stop() && !reg->hr_unclean_stop) { |
1045 | /* We track the time spent inside | 1089 | /* We track the time spent inside |
1046 | * o2hb_do_disk_heartbeat so that we avoid more than | 1090 | * o2hb_do_disk_heartbeat so that we avoid more than |
@@ -1090,6 +1134,9 @@ static int o2hb_thread(void *data) | |||
1090 | mlog_errno(ret); | 1134 | mlog_errno(ret); |
1091 | } | 1135 | } |
1092 | 1136 | ||
1137 | /* Unpin node */ | ||
1138 | o2nm_undepend_this_node(); | ||
1139 | |||
1093 | mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); | 1140 | mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); |
1094 | 1141 | ||
1095 | return 0; | 1142 | return 0; |
@@ -1142,6 +1189,12 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) | |||
1142 | reg->hr_last_timeout_start)); | 1189 | reg->hr_last_timeout_start)); |
1143 | goto done; | 1190 | goto done; |
1144 | 1191 | ||
1192 | case O2HB_DB_TYPE_REGION_PINNED: | ||
1193 | reg = (struct o2hb_region *)db->db_data; | ||
1194 | out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", | ||
1195 | !!reg->hr_item_pinned); | ||
1196 | goto done; | ||
1197 | |||
1145 | default: | 1198 | default: |
1146 | goto done; | 1199 | goto done; |
1147 | } | 1200 | } |
@@ -1315,6 +1368,8 @@ int o2hb_init(void) | |||
1315 | memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap)); | 1368 | memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap)); |
1316 | memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap)); | 1369 | memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap)); |
1317 | 1370 | ||
1371 | o2hb_dependent_users = 0; | ||
1372 | |||
1318 | return o2hb_debug_init(); | 1373 | return o2hb_debug_init(); |
1319 | } | 1374 | } |
1320 | 1375 | ||
@@ -1384,6 +1439,7 @@ static void o2hb_region_release(struct config_item *item) | |||
1384 | debugfs_remove(reg->hr_debug_livenodes); | 1439 | debugfs_remove(reg->hr_debug_livenodes); |
1385 | debugfs_remove(reg->hr_debug_regnum); | 1440 | debugfs_remove(reg->hr_debug_regnum); |
1386 | debugfs_remove(reg->hr_debug_elapsed_time); | 1441 | debugfs_remove(reg->hr_debug_elapsed_time); |
1442 | debugfs_remove(reg->hr_debug_pinned); | ||
1387 | debugfs_remove(reg->hr_debug_dir); | 1443 | debugfs_remove(reg->hr_debug_dir); |
1388 | 1444 | ||
1389 | spin_lock(&o2hb_live_lock); | 1445 | spin_lock(&o2hb_live_lock); |
@@ -1948,6 +2004,18 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir) | |||
1948 | goto bail; | 2004 | goto bail; |
1949 | } | 2005 | } |
1950 | 2006 | ||
2007 | reg->hr_debug_pinned = | ||
2008 | o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, | ||
2009 | reg->hr_debug_dir, | ||
2010 | &(reg->hr_db_pinned), | ||
2011 | sizeof(*(reg->hr_db_pinned)), | ||
2012 | O2HB_DB_TYPE_REGION_PINNED, | ||
2013 | 0, 0, reg); | ||
2014 | if (!reg->hr_debug_pinned) { | ||
2015 | mlog_errno(ret); | ||
2016 | goto bail; | ||
2017 | } | ||
2018 | |||
1951 | ret = 0; | 2019 | ret = 0; |
1952 | bail: | 2020 | bail: |
1953 | return ret; | 2021 | return ret; |
@@ -2002,15 +2070,20 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |||
2002 | { | 2070 | { |
2003 | struct task_struct *hb_task; | 2071 | struct task_struct *hb_task; |
2004 | struct o2hb_region *reg = to_o2hb_region(item); | 2072 | struct o2hb_region *reg = to_o2hb_region(item); |
2073 | int quorum_region = 0; | ||
2005 | 2074 | ||
2006 | /* stop the thread when the user removes the region dir */ | 2075 | /* stop the thread when the user removes the region dir */ |
2007 | spin_lock(&o2hb_live_lock); | 2076 | spin_lock(&o2hb_live_lock); |
2008 | if (o2hb_global_heartbeat_active()) { | 2077 | if (o2hb_global_heartbeat_active()) { |
2009 | clear_bit(reg->hr_region_num, o2hb_region_bitmap); | 2078 | clear_bit(reg->hr_region_num, o2hb_region_bitmap); |
2010 | clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); | 2079 | clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); |
2080 | if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) | ||
2081 | quorum_region = 1; | ||
2082 | clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); | ||
2011 | } | 2083 | } |
2012 | hb_task = reg->hr_task; | 2084 | hb_task = reg->hr_task; |
2013 | reg->hr_task = NULL; | 2085 | reg->hr_task = NULL; |
2086 | reg->hr_item_dropped = 1; | ||
2014 | spin_unlock(&o2hb_live_lock); | 2087 | spin_unlock(&o2hb_live_lock); |
2015 | 2088 | ||
2016 | if (hb_task) | 2089 | if (hb_task) |
@@ -2028,7 +2101,27 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, | |||
2028 | if (o2hb_global_heartbeat_active()) | 2101 | if (o2hb_global_heartbeat_active()) |
2029 | printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n", | 2102 | printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n", |
2030 | config_item_name(®->hr_item)); | 2103 | config_item_name(®->hr_item)); |
2104 | |||
2031 | config_item_put(item); | 2105 | config_item_put(item); |
2106 | |||
2107 | if (!o2hb_global_heartbeat_active() || !quorum_region) | ||
2108 | return; | ||
2109 | |||
2110 | /* | ||
2111 | * If global heartbeat active and there are dependent users, | ||
2112 | * pin all regions if quorum region count <= CUT_OFF | ||
2113 | */ | ||
2114 | spin_lock(&o2hb_live_lock); | ||
2115 | |||
2116 | if (!o2hb_dependent_users) | ||
2117 | goto unlock; | ||
2118 | |||
2119 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | ||
2120 | O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) | ||
2121 | o2hb_region_pin(NULL); | ||
2122 | |||
2123 | unlock: | ||
2124 | spin_unlock(&o2hb_live_lock); | ||
2032 | } | 2125 | } |
2033 | 2126 | ||
2034 | struct o2hb_heartbeat_group_attribute { | 2127 | struct o2hb_heartbeat_group_attribute { |
@@ -2214,63 +2307,138 @@ void o2hb_setup_callback(struct o2hb_callback_func *hc, | |||
2214 | } | 2307 | } |
2215 | EXPORT_SYMBOL_GPL(o2hb_setup_callback); | 2308 | EXPORT_SYMBOL_GPL(o2hb_setup_callback); |
2216 | 2309 | ||
2217 | static struct o2hb_region *o2hb_find_region(const char *region_uuid) | 2310 | /* |
2311 | * In local heartbeat mode, region_uuid passed matches the dlm domain name. | ||
2312 | * In global heartbeat mode, region_uuid passed is NULL. | ||
2313 | * | ||
2314 | * In local, we only pin the matching region. In global we pin all the active | ||
2315 | * regions. | ||
2316 | */ | ||
2317 | static int o2hb_region_pin(const char *region_uuid) | ||
2218 | { | 2318 | { |
2219 | struct o2hb_region *p, *reg = NULL; | 2319 | int ret = 0, found = 0; |
2320 | struct o2hb_region *reg; | ||
2321 | char *uuid; | ||
2220 | 2322 | ||
2221 | assert_spin_locked(&o2hb_live_lock); | 2323 | assert_spin_locked(&o2hb_live_lock); |
2222 | 2324 | ||
2223 | list_for_each_entry(p, &o2hb_all_regions, hr_all_item) { | 2325 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { |
2224 | if (!strcmp(region_uuid, config_item_name(&p->hr_item))) { | 2326 | uuid = config_item_name(®->hr_item); |
2225 | reg = p; | 2327 | |
2226 | break; | 2328 | /* local heartbeat */ |
2329 | if (region_uuid) { | ||
2330 | if (strcmp(region_uuid, uuid)) | ||
2331 | continue; | ||
2332 | found = 1; | ||
2333 | } | ||
2334 | |||
2335 | if (reg->hr_item_pinned || reg->hr_item_dropped) | ||
2336 | goto skip_pin; | ||
2337 | |||
2338 | /* Ignore ENOENT only for local hb (userdlm domain) */ | ||
2339 | ret = o2nm_depend_item(®->hr_item); | ||
2340 | if (!ret) { | ||
2341 | mlog(ML_CLUSTER, "Pin region %s\n", uuid); | ||
2342 | reg->hr_item_pinned = 1; | ||
2343 | } else { | ||
2344 | if (ret == -ENOENT && found) | ||
2345 | ret = 0; | ||
2346 | else { | ||
2347 | mlog(ML_ERROR, "Pin region %s fails with %d\n", | ||
2348 | uuid, ret); | ||
2349 | break; | ||
2350 | } | ||
2227 | } | 2351 | } |
2352 | skip_pin: | ||
2353 | if (found) | ||
2354 | break; | ||
2228 | } | 2355 | } |
2229 | 2356 | ||
2230 | return reg; | 2357 | return ret; |
2231 | } | 2358 | } |
2232 | 2359 | ||
2233 | static int o2hb_region_get(const char *region_uuid) | 2360 | /* |
2361 | * In local heartbeat mode, region_uuid passed matches the dlm domain name. | ||
2362 | * In global heartbeat mode, region_uuid passed is NULL. | ||
2363 | * | ||
2364 | * In local, we only unpin the matching region. In global we unpin all the | ||
2365 | * active regions. | ||
2366 | */ | ||
2367 | static void o2hb_region_unpin(const char *region_uuid) | ||
2234 | { | 2368 | { |
2235 | int ret = 0; | ||
2236 | struct o2hb_region *reg; | 2369 | struct o2hb_region *reg; |
2370 | char *uuid; | ||
2371 | int found = 0; | ||
2237 | 2372 | ||
2238 | spin_lock(&o2hb_live_lock); | 2373 | assert_spin_locked(&o2hb_live_lock); |
2239 | 2374 | ||
2240 | reg = o2hb_find_region(region_uuid); | 2375 | list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { |
2241 | if (!reg) | 2376 | uuid = config_item_name(®->hr_item); |
2242 | ret = -ENOENT; | 2377 | if (region_uuid) { |
2243 | spin_unlock(&o2hb_live_lock); | 2378 | if (strcmp(region_uuid, uuid)) |
2379 | continue; | ||
2380 | found = 1; | ||
2381 | } | ||
2244 | 2382 | ||
2245 | if (ret) | 2383 | if (reg->hr_item_pinned) { |
2246 | goto out; | 2384 | mlog(ML_CLUSTER, "Unpin region %s\n", uuid); |
2385 | o2nm_undepend_item(®->hr_item); | ||
2386 | reg->hr_item_pinned = 0; | ||
2387 | } | ||
2388 | if (found) | ||
2389 | break; | ||
2390 | } | ||
2391 | } | ||
2247 | 2392 | ||
2248 | ret = o2nm_depend_this_node(); | 2393 | static int o2hb_region_inc_user(const char *region_uuid) |
2249 | if (ret) | 2394 | { |
2250 | goto out; | 2395 | int ret = 0; |
2251 | 2396 | ||
2252 | ret = o2nm_depend_item(®->hr_item); | 2397 | spin_lock(&o2hb_live_lock); |
2253 | if (ret) | ||
2254 | o2nm_undepend_this_node(); | ||
2255 | 2398 | ||
2256 | out: | 2399 | /* local heartbeat */ |
2400 | if (!o2hb_global_heartbeat_active()) { | ||
2401 | ret = o2hb_region_pin(region_uuid); | ||
2402 | goto unlock; | ||
2403 | } | ||
2404 | |||
2405 | /* | ||
2406 | * if global heartbeat active and this is the first dependent user, | ||
2407 | * pin all regions if quorum region count <= CUT_OFF | ||
2408 | */ | ||
2409 | o2hb_dependent_users++; | ||
2410 | if (o2hb_dependent_users > 1) | ||
2411 | goto unlock; | ||
2412 | |||
2413 | if (o2hb_pop_count(&o2hb_quorum_region_bitmap, | ||
2414 | O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) | ||
2415 | ret = o2hb_region_pin(NULL); | ||
2416 | |||
2417 | unlock: | ||
2418 | spin_unlock(&o2hb_live_lock); | ||
2257 | return ret; | 2419 | return ret; |
2258 | } | 2420 | } |
2259 | 2421 | ||
2260 | static void o2hb_region_put(const char *region_uuid) | 2422 | void o2hb_region_dec_user(const char *region_uuid) |
2261 | { | 2423 | { |
2262 | struct o2hb_region *reg; | ||
2263 | |||
2264 | spin_lock(&o2hb_live_lock); | 2424 | spin_lock(&o2hb_live_lock); |
2265 | 2425 | ||
2266 | reg = o2hb_find_region(region_uuid); | 2426 | /* local heartbeat */ |
2427 | if (!o2hb_global_heartbeat_active()) { | ||
2428 | o2hb_region_unpin(region_uuid); | ||
2429 | goto unlock; | ||
2430 | } | ||
2267 | 2431 | ||
2268 | spin_unlock(&o2hb_live_lock); | 2432 | /* |
2433 | * if global heartbeat active and there are no dependent users, | ||
2434 | * unpin all quorum regions | ||
2435 | */ | ||
2436 | o2hb_dependent_users--; | ||
2437 | if (!o2hb_dependent_users) | ||
2438 | o2hb_region_unpin(NULL); | ||
2269 | 2439 | ||
2270 | if (reg) { | 2440 | unlock: |
2271 | o2nm_undepend_item(®->hr_item); | 2441 | spin_unlock(&o2hb_live_lock); |
2272 | o2nm_undepend_this_node(); | ||
2273 | } | ||
2274 | } | 2442 | } |
2275 | 2443 | ||
2276 | int o2hb_register_callback(const char *region_uuid, | 2444 | int o2hb_register_callback(const char *region_uuid, |
@@ -2291,9 +2459,11 @@ int o2hb_register_callback(const char *region_uuid, | |||
2291 | } | 2459 | } |
2292 | 2460 | ||
2293 | if (region_uuid) { | 2461 | if (region_uuid) { |
2294 | ret = o2hb_region_get(region_uuid); | 2462 | ret = o2hb_region_inc_user(region_uuid); |
2295 | if (ret) | 2463 | if (ret) { |
2464 | mlog_errno(ret); | ||
2296 | goto out; | 2465 | goto out; |
2466 | } | ||
2297 | } | 2467 | } |
2298 | 2468 | ||
2299 | down_write(&o2hb_callback_sem); | 2469 | down_write(&o2hb_callback_sem); |
@@ -2311,7 +2481,7 @@ int o2hb_register_callback(const char *region_uuid, | |||
2311 | up_write(&o2hb_callback_sem); | 2481 | up_write(&o2hb_callback_sem); |
2312 | ret = 0; | 2482 | ret = 0; |
2313 | out: | 2483 | out: |
2314 | mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n", | 2484 | mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n", |
2315 | ret, __builtin_return_address(0), hc); | 2485 | ret, __builtin_return_address(0), hc); |
2316 | return ret; | 2486 | return ret; |
2317 | } | 2487 | } |
@@ -2322,7 +2492,7 @@ void o2hb_unregister_callback(const char *region_uuid, | |||
2322 | { | 2492 | { |
2323 | BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); | 2493 | BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); |
2324 | 2494 | ||
2325 | mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n", | 2495 | mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n", |
2326 | __builtin_return_address(0), hc); | 2496 | __builtin_return_address(0), hc); |
2327 | 2497 | ||
2328 | /* XXX Can this happen _with_ a region reference? */ | 2498 | /* XXX Can this happen _with_ a region reference? */ |
@@ -2330,7 +2500,7 @@ void o2hb_unregister_callback(const char *region_uuid, | |||
2330 | return; | 2500 | return; |
2331 | 2501 | ||
2332 | if (region_uuid) | 2502 | if (region_uuid) |
2333 | o2hb_region_put(region_uuid); | 2503 | o2hb_region_dec_user(region_uuid); |
2334 | 2504 | ||
2335 | down_write(&o2hb_callback_sem); | 2505 | down_write(&o2hb_callback_sem); |
2336 | 2506 | ||
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index a3f150e52b02..3a5835904b3d 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c | |||
@@ -46,10 +46,15 @@ | |||
46 | #define O2NET_DEBUG_DIR "o2net" | 46 | #define O2NET_DEBUG_DIR "o2net" |
47 | #define SC_DEBUG_NAME "sock_containers" | 47 | #define SC_DEBUG_NAME "sock_containers" |
48 | #define NST_DEBUG_NAME "send_tracking" | 48 | #define NST_DEBUG_NAME "send_tracking" |
49 | #define STATS_DEBUG_NAME "stats" | ||
50 | |||
51 | #define SHOW_SOCK_CONTAINERS 0 | ||
52 | #define SHOW_SOCK_STATS 1 | ||
49 | 53 | ||
50 | static struct dentry *o2net_dentry; | 54 | static struct dentry *o2net_dentry; |
51 | static struct dentry *sc_dentry; | 55 | static struct dentry *sc_dentry; |
52 | static struct dentry *nst_dentry; | 56 | static struct dentry *nst_dentry; |
57 | static struct dentry *stats_dentry; | ||
53 | 58 | ||
54 | static DEFINE_SPINLOCK(o2net_debug_lock); | 59 | static DEFINE_SPINLOCK(o2net_debug_lock); |
55 | 60 | ||
@@ -123,37 +128,42 @@ static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
123 | static int nst_seq_show(struct seq_file *seq, void *v) | 128 | static int nst_seq_show(struct seq_file *seq, void *v) |
124 | { | 129 | { |
125 | struct o2net_send_tracking *nst, *dummy_nst = seq->private; | 130 | struct o2net_send_tracking *nst, *dummy_nst = seq->private; |
131 | ktime_t now; | ||
132 | s64 sock, send, status; | ||
126 | 133 | ||
127 | spin_lock(&o2net_debug_lock); | 134 | spin_lock(&o2net_debug_lock); |
128 | nst = next_nst(dummy_nst); | 135 | nst = next_nst(dummy_nst); |
136 | if (!nst) | ||
137 | goto out; | ||
129 | 138 | ||
130 | if (nst != NULL) { | 139 | now = ktime_get(); |
131 | /* get_task_comm isn't exported. oh well. */ | 140 | sock = ktime_to_us(ktime_sub(now, nst->st_sock_time)); |
132 | seq_printf(seq, "%p:\n" | 141 | send = ktime_to_us(ktime_sub(now, nst->st_send_time)); |
133 | " pid: %lu\n" | 142 | status = ktime_to_us(ktime_sub(now, nst->st_status_time)); |
134 | " tgid: %lu\n" | 143 | |
135 | " process name: %s\n" | 144 | /* get_task_comm isn't exported. oh well. */ |
136 | " node: %u\n" | 145 | seq_printf(seq, "%p:\n" |
137 | " sc: %p\n" | 146 | " pid: %lu\n" |
138 | " message id: %d\n" | 147 | " tgid: %lu\n" |
139 | " message type: %u\n" | 148 | " process name: %s\n" |
140 | " message key: 0x%08x\n" | 149 | " node: %u\n" |
141 | " sock acquiry: %lu.%ld\n" | 150 | " sc: %p\n" |
142 | " send start: %lu.%ld\n" | 151 | " message id: %d\n" |
143 | " wait start: %lu.%ld\n", | 152 | " message type: %u\n" |
144 | nst, (unsigned long)nst->st_task->pid, | 153 | " message key: 0x%08x\n" |
145 | (unsigned long)nst->st_task->tgid, | 154 | " sock acquiry: %lld usecs ago\n" |
146 | nst->st_task->comm, nst->st_node, | 155 | " send start: %lld usecs ago\n" |
147 | nst->st_sc, nst->st_id, nst->st_msg_type, | 156 | " wait start: %lld usecs ago\n", |
148 | nst->st_msg_key, | 157 | nst, (unsigned long)task_pid_nr(nst->st_task), |
149 | nst->st_sock_time.tv_sec, | 158 | (unsigned long)nst->st_task->tgid, |
150 | (long)nst->st_sock_time.tv_usec, | 159 | nst->st_task->comm, nst->st_node, |
151 | nst->st_send_time.tv_sec, | 160 | nst->st_sc, nst->st_id, nst->st_msg_type, |
152 | (long)nst->st_send_time.tv_usec, | 161 | nst->st_msg_key, |
153 | nst->st_status_time.tv_sec, | 162 | (long long)sock, |
154 | (long)nst->st_status_time.tv_usec); | 163 | (long long)send, |
155 | } | 164 | (long long)status); |
156 | 165 | ||
166 | out: | ||
157 | spin_unlock(&o2net_debug_lock); | 167 | spin_unlock(&o2net_debug_lock); |
158 | 168 | ||
159 | return 0; | 169 | return 0; |
@@ -228,6 +238,11 @@ void o2net_debug_del_sc(struct o2net_sock_container *sc) | |||
228 | spin_unlock(&o2net_debug_lock); | 238 | spin_unlock(&o2net_debug_lock); |
229 | } | 239 | } |
230 | 240 | ||
241 | struct o2net_sock_debug { | ||
242 | int dbg_ctxt; | ||
243 | struct o2net_sock_container *dbg_sock; | ||
244 | }; | ||
245 | |||
231 | static struct o2net_sock_container | 246 | static struct o2net_sock_container |
232 | *next_sc(struct o2net_sock_container *sc_start) | 247 | *next_sc(struct o2net_sock_container *sc_start) |
233 | { | 248 | { |
@@ -253,7 +268,8 @@ static struct o2net_sock_container | |||
253 | 268 | ||
254 | static void *sc_seq_start(struct seq_file *seq, loff_t *pos) | 269 | static void *sc_seq_start(struct seq_file *seq, loff_t *pos) |
255 | { | 270 | { |
256 | struct o2net_sock_container *sc, *dummy_sc = seq->private; | 271 | struct o2net_sock_debug *sd = seq->private; |
272 | struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; | ||
257 | 273 | ||
258 | spin_lock(&o2net_debug_lock); | 274 | spin_lock(&o2net_debug_lock); |
259 | sc = next_sc(dummy_sc); | 275 | sc = next_sc(dummy_sc); |
@@ -264,7 +280,8 @@ static void *sc_seq_start(struct seq_file *seq, loff_t *pos) | |||
264 | 280 | ||
265 | static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | 281 | static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
266 | { | 282 | { |
267 | struct o2net_sock_container *sc, *dummy_sc = seq->private; | 283 | struct o2net_sock_debug *sd = seq->private; |
284 | struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; | ||
268 | 285 | ||
269 | spin_lock(&o2net_debug_lock); | 286 | spin_lock(&o2net_debug_lock); |
270 | sc = next_sc(dummy_sc); | 287 | sc = next_sc(dummy_sc); |
@@ -276,65 +293,107 @@ static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
276 | return sc; /* unused, just needs to be null when done */ | 293 | return sc; /* unused, just needs to be null when done */ |
277 | } | 294 | } |
278 | 295 | ||
279 | #define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec | 296 | #ifdef CONFIG_OCFS2_FS_STATS |
297 | # define sc_send_count(_s) ((_s)->sc_send_count) | ||
298 | # define sc_recv_count(_s) ((_s)->sc_recv_count) | ||
299 | # define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total)) | ||
300 | # define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total)) | ||
301 | # define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total)) | ||
302 | # define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total)) | ||
303 | #else | ||
304 | # define sc_send_count(_s) (0U) | ||
305 | # define sc_recv_count(_s) (0U) | ||
306 | # define sc_tv_acquiry_total_ns(_s) (0LL) | ||
307 | # define sc_tv_send_total_ns(_s) (0LL) | ||
308 | # define sc_tv_status_total_ns(_s) (0LL) | ||
309 | # define sc_tv_process_total_ns(_s) (0LL) | ||
310 | #endif | ||
311 | |||
312 | /* So that debugfs.ocfs2 can determine which format is being used */ | ||
313 | #define O2NET_STATS_STR_VERSION 1 | ||
314 | static void sc_show_sock_stats(struct seq_file *seq, | ||
315 | struct o2net_sock_container *sc) | ||
316 | { | ||
317 | if (!sc) | ||
318 | return; | ||
319 | |||
320 | seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION, | ||
321 | sc->sc_node->nd_num, (unsigned long)sc_send_count(sc), | ||
322 | (long long)sc_tv_acquiry_total_ns(sc), | ||
323 | (long long)sc_tv_send_total_ns(sc), | ||
324 | (long long)sc_tv_status_total_ns(sc), | ||
325 | (unsigned long)sc_recv_count(sc), | ||
326 | (long long)sc_tv_process_total_ns(sc)); | ||
327 | } | ||
328 | |||
329 | static void sc_show_sock_container(struct seq_file *seq, | ||
330 | struct o2net_sock_container *sc) | ||
331 | { | ||
332 | struct inet_sock *inet = NULL; | ||
333 | __be32 saddr = 0, daddr = 0; | ||
334 | __be16 sport = 0, dport = 0; | ||
335 | |||
336 | if (!sc) | ||
337 | return; | ||
338 | |||
339 | if (sc->sc_sock) { | ||
340 | inet = inet_sk(sc->sc_sock->sk); | ||
341 | /* the stack's structs aren't sparse endian clean */ | ||
342 | saddr = (__force __be32)inet->inet_saddr; | ||
343 | daddr = (__force __be32)inet->inet_daddr; | ||
344 | sport = (__force __be16)inet->inet_sport; | ||
345 | dport = (__force __be16)inet->inet_dport; | ||
346 | } | ||
347 | |||
348 | /* XXX sigh, inet-> doesn't have sparse annotation so any | ||
349 | * use of it here generates a warning with -Wbitwise */ | ||
350 | seq_printf(seq, "%p:\n" | ||
351 | " krefs: %d\n" | ||
352 | " sock: %pI4:%u -> " | ||
353 | "%pI4:%u\n" | ||
354 | " remote node: %s\n" | ||
355 | " page off: %zu\n" | ||
356 | " handshake ok: %u\n" | ||
357 | " timer: %lld usecs\n" | ||
358 | " data ready: %lld usecs\n" | ||
359 | " advance start: %lld usecs\n" | ||
360 | " advance stop: %lld usecs\n" | ||
361 | " func start: %lld usecs\n" | ||
362 | " func stop: %lld usecs\n" | ||
363 | " func key: 0x%08x\n" | ||
364 | " func type: %u\n", | ||
365 | sc, | ||
366 | atomic_read(&sc->sc_kref.refcount), | ||
367 | &saddr, inet ? ntohs(sport) : 0, | ||
368 | &daddr, inet ? ntohs(dport) : 0, | ||
369 | sc->sc_node->nd_name, | ||
370 | sc->sc_page_off, | ||
371 | sc->sc_handshake_ok, | ||
372 | (long long)ktime_to_us(sc->sc_tv_timer), | ||
373 | (long long)ktime_to_us(sc->sc_tv_data_ready), | ||
374 | (long long)ktime_to_us(sc->sc_tv_advance_start), | ||
375 | (long long)ktime_to_us(sc->sc_tv_advance_stop), | ||
376 | (long long)ktime_to_us(sc->sc_tv_func_start), | ||
377 | (long long)ktime_to_us(sc->sc_tv_func_stop), | ||
378 | sc->sc_msg_key, | ||
379 | sc->sc_msg_type); | ||
380 | } | ||
280 | 381 | ||
281 | static int sc_seq_show(struct seq_file *seq, void *v) | 382 | static int sc_seq_show(struct seq_file *seq, void *v) |
282 | { | 383 | { |
283 | struct o2net_sock_container *sc, *dummy_sc = seq->private; | 384 | struct o2net_sock_debug *sd = seq->private; |
385 | struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; | ||
284 | 386 | ||
285 | spin_lock(&o2net_debug_lock); | 387 | spin_lock(&o2net_debug_lock); |
286 | sc = next_sc(dummy_sc); | 388 | sc = next_sc(dummy_sc); |
287 | 389 | ||
288 | if (sc != NULL) { | 390 | if (sc) { |
289 | struct inet_sock *inet = NULL; | 391 | if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS) |
290 | 392 | sc_show_sock_container(seq, sc); | |
291 | __be32 saddr = 0, daddr = 0; | 393 | else |
292 | __be16 sport = 0, dport = 0; | 394 | sc_show_sock_stats(seq, sc); |
293 | |||
294 | if (sc->sc_sock) { | ||
295 | inet = inet_sk(sc->sc_sock->sk); | ||
296 | /* the stack's structs aren't sparse endian clean */ | ||
297 | saddr = (__force __be32)inet->inet_saddr; | ||
298 | daddr = (__force __be32)inet->inet_daddr; | ||
299 | sport = (__force __be16)inet->inet_sport; | ||
300 | dport = (__force __be16)inet->inet_dport; | ||
301 | } | ||
302 | |||
303 | /* XXX sigh, inet-> doesn't have sparse annotation so any | ||
304 | * use of it here generates a warning with -Wbitwise */ | ||
305 | seq_printf(seq, "%p:\n" | ||
306 | " krefs: %d\n" | ||
307 | " sock: %pI4:%u -> " | ||
308 | "%pI4:%u\n" | ||
309 | " remote node: %s\n" | ||
310 | " page off: %zu\n" | ||
311 | " handshake ok: %u\n" | ||
312 | " timer: %lu.%ld\n" | ||
313 | " data ready: %lu.%ld\n" | ||
314 | " advance start: %lu.%ld\n" | ||
315 | " advance stop: %lu.%ld\n" | ||
316 | " func start: %lu.%ld\n" | ||
317 | " func stop: %lu.%ld\n" | ||
318 | " func key: %u\n" | ||
319 | " func type: %u\n", | ||
320 | sc, | ||
321 | atomic_read(&sc->sc_kref.refcount), | ||
322 | &saddr, inet ? ntohs(sport) : 0, | ||
323 | &daddr, inet ? ntohs(dport) : 0, | ||
324 | sc->sc_node->nd_name, | ||
325 | sc->sc_page_off, | ||
326 | sc->sc_handshake_ok, | ||
327 | TV_SEC_USEC(sc->sc_tv_timer), | ||
328 | TV_SEC_USEC(sc->sc_tv_data_ready), | ||
329 | TV_SEC_USEC(sc->sc_tv_advance_start), | ||
330 | TV_SEC_USEC(sc->sc_tv_advance_stop), | ||
331 | TV_SEC_USEC(sc->sc_tv_func_start), | ||
332 | TV_SEC_USEC(sc->sc_tv_func_stop), | ||
333 | sc->sc_msg_key, | ||
334 | sc->sc_msg_type); | ||
335 | } | 395 | } |
336 | 396 | ||
337 | |||
338 | spin_unlock(&o2net_debug_lock); | 397 | spin_unlock(&o2net_debug_lock); |
339 | 398 | ||
340 | return 0; | 399 | return 0; |
@@ -351,7 +410,7 @@ static const struct seq_operations sc_seq_ops = { | |||
351 | .show = sc_seq_show, | 410 | .show = sc_seq_show, |
352 | }; | 411 | }; |
353 | 412 | ||
354 | static int sc_fop_open(struct inode *inode, struct file *file) | 413 | static int sc_common_open(struct file *file, struct o2net_sock_debug *sd) |
355 | { | 414 | { |
356 | struct o2net_sock_container *dummy_sc; | 415 | struct o2net_sock_container *dummy_sc; |
357 | struct seq_file *seq; | 416 | struct seq_file *seq; |
@@ -369,7 +428,8 @@ static int sc_fop_open(struct inode *inode, struct file *file) | |||
369 | goto out; | 428 | goto out; |
370 | 429 | ||
371 | seq = file->private_data; | 430 | seq = file->private_data; |
372 | seq->private = dummy_sc; | 431 | seq->private = sd; |
432 | sd->dbg_sock = dummy_sc; | ||
373 | o2net_debug_add_sc(dummy_sc); | 433 | o2net_debug_add_sc(dummy_sc); |
374 | 434 | ||
375 | dummy_sc = NULL; | 435 | dummy_sc = NULL; |
@@ -382,12 +442,48 @@ out: | |||
382 | static int sc_fop_release(struct inode *inode, struct file *file) | 442 | static int sc_fop_release(struct inode *inode, struct file *file) |
383 | { | 443 | { |
384 | struct seq_file *seq = file->private_data; | 444 | struct seq_file *seq = file->private_data; |
385 | struct o2net_sock_container *dummy_sc = seq->private; | 445 | struct o2net_sock_debug *sd = seq->private; |
446 | struct o2net_sock_container *dummy_sc = sd->dbg_sock; | ||
386 | 447 | ||
387 | o2net_debug_del_sc(dummy_sc); | 448 | o2net_debug_del_sc(dummy_sc); |
388 | return seq_release_private(inode, file); | 449 | return seq_release_private(inode, file); |
389 | } | 450 | } |
390 | 451 | ||
452 | static int stats_fop_open(struct inode *inode, struct file *file) | ||
453 | { | ||
454 | struct o2net_sock_debug *sd; | ||
455 | |||
456 | sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); | ||
457 | if (sd == NULL) | ||
458 | return -ENOMEM; | ||
459 | |||
460 | sd->dbg_ctxt = SHOW_SOCK_STATS; | ||
461 | sd->dbg_sock = NULL; | ||
462 | |||
463 | return sc_common_open(file, sd); | ||
464 | } | ||
465 | |||
466 | static const struct file_operations stats_seq_fops = { | ||
467 | .open = stats_fop_open, | ||
468 | .read = seq_read, | ||
469 | .llseek = seq_lseek, | ||
470 | .release = sc_fop_release, | ||
471 | }; | ||
472 | |||
473 | static int sc_fop_open(struct inode *inode, struct file *file) | ||
474 | { | ||
475 | struct o2net_sock_debug *sd; | ||
476 | |||
477 | sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); | ||
478 | if (sd == NULL) | ||
479 | return -ENOMEM; | ||
480 | |||
481 | sd->dbg_ctxt = SHOW_SOCK_CONTAINERS; | ||
482 | sd->dbg_sock = NULL; | ||
483 | |||
484 | return sc_common_open(file, sd); | ||
485 | } | ||
486 | |||
391 | static const struct file_operations sc_seq_fops = { | 487 | static const struct file_operations sc_seq_fops = { |
392 | .open = sc_fop_open, | 488 | .open = sc_fop_open, |
393 | .read = seq_read, | 489 | .read = seq_read, |
@@ -419,25 +515,29 @@ int o2net_debugfs_init(void) | |||
419 | goto bail; | 515 | goto bail; |
420 | } | 516 | } |
421 | 517 | ||
518 | stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR, | ||
519 | o2net_dentry, NULL, | ||
520 | &stats_seq_fops); | ||
521 | if (!stats_dentry) { | ||
522 | mlog_errno(-ENOMEM); | ||
523 | goto bail; | ||
524 | } | ||
525 | |||
422 | return 0; | 526 | return 0; |
423 | bail: | 527 | bail: |
424 | if (sc_dentry) | 528 | debugfs_remove(stats_dentry); |
425 | debugfs_remove(sc_dentry); | 529 | debugfs_remove(sc_dentry); |
426 | if (nst_dentry) | 530 | debugfs_remove(nst_dentry); |
427 | debugfs_remove(nst_dentry); | 531 | debugfs_remove(o2net_dentry); |
428 | if (o2net_dentry) | ||
429 | debugfs_remove(o2net_dentry); | ||
430 | return -ENOMEM; | 532 | return -ENOMEM; |
431 | } | 533 | } |
432 | 534 | ||
433 | void o2net_debugfs_exit(void) | 535 | void o2net_debugfs_exit(void) |
434 | { | 536 | { |
435 | if (sc_dentry) | 537 | debugfs_remove(stats_dentry); |
436 | debugfs_remove(sc_dentry); | 538 | debugfs_remove(sc_dentry); |
437 | if (nst_dentry) | 539 | debugfs_remove(nst_dentry); |
438 | debugfs_remove(nst_dentry); | 540 | debugfs_remove(o2net_dentry); |
439 | if (o2net_dentry) | ||
440 | debugfs_remove(o2net_dentry); | ||
441 | } | 541 | } |
442 | 542 | ||
443 | #endif /* CONFIG_DEBUG_FS */ | 543 | #endif /* CONFIG_DEBUG_FS */ |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 9aa426e42123..3b11cb1e38fc 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -153,63 +153,114 @@ static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, | |||
153 | nst->st_node = node; | 153 | nst->st_node = node; |
154 | } | 154 | } |
155 | 155 | ||
156 | static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) | 156 | static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) |
157 | { | 157 | { |
158 | do_gettimeofday(&nst->st_sock_time); | 158 | nst->st_sock_time = ktime_get(); |
159 | } | 159 | } |
160 | 160 | ||
161 | static void o2net_set_nst_send_time(struct o2net_send_tracking *nst) | 161 | static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) |
162 | { | 162 | { |
163 | do_gettimeofday(&nst->st_send_time); | 163 | nst->st_send_time = ktime_get(); |
164 | } | 164 | } |
165 | 165 | ||
166 | static void o2net_set_nst_status_time(struct o2net_send_tracking *nst) | 166 | static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) |
167 | { | 167 | { |
168 | do_gettimeofday(&nst->st_status_time); | 168 | nst->st_status_time = ktime_get(); |
169 | } | 169 | } |
170 | 170 | ||
171 | static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, | 171 | static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, |
172 | struct o2net_sock_container *sc) | 172 | struct o2net_sock_container *sc) |
173 | { | 173 | { |
174 | nst->st_sc = sc; | 174 | nst->st_sc = sc; |
175 | } | 175 | } |
176 | 176 | ||
177 | static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id) | 177 | static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, |
178 | u32 msg_id) | ||
178 | { | 179 | { |
179 | nst->st_id = msg_id; | 180 | nst->st_id = msg_id; |
180 | } | 181 | } |
181 | 182 | ||
182 | #else /* CONFIG_DEBUG_FS */ | 183 | static inline void o2net_set_sock_timer(struct o2net_sock_container *sc) |
183 | |||
184 | static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, | ||
185 | u32 msgkey, struct task_struct *task, u8 node) | ||
186 | { | 184 | { |
185 | sc->sc_tv_timer = ktime_get(); | ||
187 | } | 186 | } |
188 | 187 | ||
189 | static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) | 188 | static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc) |
190 | { | 189 | { |
190 | sc->sc_tv_data_ready = ktime_get(); | ||
191 | } | 191 | } |
192 | 192 | ||
193 | static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) | 193 | static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc) |
194 | { | 194 | { |
195 | sc->sc_tv_advance_start = ktime_get(); | ||
195 | } | 196 | } |
196 | 197 | ||
197 | static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) | 198 | static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc) |
198 | { | 199 | { |
200 | sc->sc_tv_advance_stop = ktime_get(); | ||
199 | } | 201 | } |
200 | 202 | ||
201 | static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, | 203 | static inline void o2net_set_func_start_time(struct o2net_sock_container *sc) |
202 | struct o2net_sock_container *sc) | ||
203 | { | 204 | { |
205 | sc->sc_tv_func_start = ktime_get(); | ||
204 | } | 206 | } |
205 | 207 | ||
206 | static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, | 208 | static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc) |
207 | u32 msg_id) | ||
208 | { | 209 | { |
210 | sc->sc_tv_func_stop = ktime_get(); | ||
209 | } | 211 | } |
210 | 212 | ||
213 | static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) | ||
214 | { | ||
215 | return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start); | ||
216 | } | ||
217 | #else /* CONFIG_DEBUG_FS */ | ||
218 | # define o2net_init_nst(a, b, c, d, e) | ||
219 | # define o2net_set_nst_sock_time(a) | ||
220 | # define o2net_set_nst_send_time(a) | ||
221 | # define o2net_set_nst_status_time(a) | ||
222 | # define o2net_set_nst_sock_container(a, b) | ||
223 | # define o2net_set_nst_msg_id(a, b) | ||
224 | # define o2net_set_sock_timer(a) | ||
225 | # define o2net_set_data_ready_time(a) | ||
226 | # define o2net_set_advance_start_time(a) | ||
227 | # define o2net_set_advance_stop_time(a) | ||
228 | # define o2net_set_func_start_time(a) | ||
229 | # define o2net_set_func_stop_time(a) | ||
230 | # define o2net_get_func_run_time(a) (ktime_t)0 | ||
211 | #endif /* CONFIG_DEBUG_FS */ | 231 | #endif /* CONFIG_DEBUG_FS */ |
212 | 232 | ||
233 | #ifdef CONFIG_OCFS2_FS_STATS | ||
234 | static void o2net_update_send_stats(struct o2net_send_tracking *nst, | ||
235 | struct o2net_sock_container *sc) | ||
236 | { | ||
237 | sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total, | ||
238 | ktime_sub(ktime_get(), | ||
239 | nst->st_status_time)); | ||
240 | sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total, | ||
241 | ktime_sub(nst->st_status_time, | ||
242 | nst->st_send_time)); | ||
243 | sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total, | ||
244 | ktime_sub(nst->st_send_time, | ||
245 | nst->st_sock_time)); | ||
246 | sc->sc_send_count++; | ||
247 | } | ||
248 | |||
249 | static void o2net_update_recv_stats(struct o2net_sock_container *sc) | ||
250 | { | ||
251 | sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total, | ||
252 | o2net_get_func_run_time(sc)); | ||
253 | sc->sc_recv_count++; | ||
254 | } | ||
255 | |||
256 | #else | ||
257 | |||
258 | # define o2net_update_send_stats(a, b) | ||
259 | |||
260 | # define o2net_update_recv_stats(sc) | ||
261 | |||
262 | #endif /* CONFIG_OCFS2_FS_STATS */ | ||
263 | |||
213 | static inline int o2net_reconnect_delay(void) | 264 | static inline int o2net_reconnect_delay(void) |
214 | { | 265 | { |
215 | return o2nm_single_cluster->cl_reconnect_delay_ms; | 266 | return o2nm_single_cluster->cl_reconnect_delay_ms; |
@@ -355,6 +406,7 @@ static void sc_kref_release(struct kref *kref) | |||
355 | sc->sc_sock = NULL; | 406 | sc->sc_sock = NULL; |
356 | } | 407 | } |
357 | 408 | ||
409 | o2nm_undepend_item(&sc->sc_node->nd_item); | ||
358 | o2nm_node_put(sc->sc_node); | 410 | o2nm_node_put(sc->sc_node); |
359 | sc->sc_node = NULL; | 411 | sc->sc_node = NULL; |
360 | 412 | ||
@@ -376,6 +428,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) | |||
376 | { | 428 | { |
377 | struct o2net_sock_container *sc, *ret = NULL; | 429 | struct o2net_sock_container *sc, *ret = NULL; |
378 | struct page *page = NULL; | 430 | struct page *page = NULL; |
431 | int status = 0; | ||
379 | 432 | ||
380 | page = alloc_page(GFP_NOFS); | 433 | page = alloc_page(GFP_NOFS); |
381 | sc = kzalloc(sizeof(*sc), GFP_NOFS); | 434 | sc = kzalloc(sizeof(*sc), GFP_NOFS); |
@@ -386,6 +439,13 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) | |||
386 | o2nm_node_get(node); | 439 | o2nm_node_get(node); |
387 | sc->sc_node = node; | 440 | sc->sc_node = node; |
388 | 441 | ||
442 | /* pin the node item of the remote node */ | ||
443 | status = o2nm_depend_item(&node->nd_item); | ||
444 | if (status) { | ||
445 | mlog_errno(status); | ||
446 | o2nm_node_put(node); | ||
447 | goto out; | ||
448 | } | ||
389 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); | 449 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); |
390 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); | 450 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); |
391 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); | 451 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); |
@@ -546,7 +606,7 @@ static void o2net_data_ready(struct sock *sk, int bytes) | |||
546 | if (sk->sk_user_data) { | 606 | if (sk->sk_user_data) { |
547 | struct o2net_sock_container *sc = sk->sk_user_data; | 607 | struct o2net_sock_container *sc = sk->sk_user_data; |
548 | sclog(sc, "data_ready hit\n"); | 608 | sclog(sc, "data_ready hit\n"); |
549 | do_gettimeofday(&sc->sc_tv_data_ready); | 609 | o2net_set_data_ready_time(sc); |
550 | o2net_sc_queue_work(sc, &sc->sc_rx_work); | 610 | o2net_sc_queue_work(sc, &sc->sc_rx_work); |
551 | ready = sc->sc_data_ready; | 611 | ready = sc->sc_data_ready; |
552 | } else { | 612 | } else { |
@@ -1070,6 +1130,8 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, | |||
1070 | o2net_set_nst_status_time(&nst); | 1130 | o2net_set_nst_status_time(&nst); |
1071 | wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw)); | 1131 | wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw)); |
1072 | 1132 | ||
1133 | o2net_update_send_stats(&nst, sc); | ||
1134 | |||
1073 | /* Note that we avoid overwriting the callers status return | 1135 | /* Note that we avoid overwriting the callers status return |
1074 | * variable if a system error was reported on the other | 1136 | * variable if a system error was reported on the other |
1075 | * side. Callers beware. */ | 1137 | * side. Callers beware. */ |
@@ -1183,13 +1245,15 @@ static int o2net_process_message(struct o2net_sock_container *sc, | |||
1183 | if (syserr != O2NET_ERR_NONE) | 1245 | if (syserr != O2NET_ERR_NONE) |
1184 | goto out_respond; | 1246 | goto out_respond; |
1185 | 1247 | ||
1186 | do_gettimeofday(&sc->sc_tv_func_start); | 1248 | o2net_set_func_start_time(sc); |
1187 | sc->sc_msg_key = be32_to_cpu(hdr->key); | 1249 | sc->sc_msg_key = be32_to_cpu(hdr->key); |
1188 | sc->sc_msg_type = be16_to_cpu(hdr->msg_type); | 1250 | sc->sc_msg_type = be16_to_cpu(hdr->msg_type); |
1189 | handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + | 1251 | handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + |
1190 | be16_to_cpu(hdr->data_len), | 1252 | be16_to_cpu(hdr->data_len), |
1191 | nmh->nh_func_data, &ret_data); | 1253 | nmh->nh_func_data, &ret_data); |
1192 | do_gettimeofday(&sc->sc_tv_func_stop); | 1254 | o2net_set_func_stop_time(sc); |
1255 | |||
1256 | o2net_update_recv_stats(sc); | ||
1193 | 1257 | ||
1194 | out_respond: | 1258 | out_respond: |
1195 | /* this destroys the hdr, so don't use it after this */ | 1259 | /* this destroys the hdr, so don't use it after this */ |
@@ -1300,7 +1364,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc) | |||
1300 | size_t datalen; | 1364 | size_t datalen; |
1301 | 1365 | ||
1302 | sclog(sc, "receiving\n"); | 1366 | sclog(sc, "receiving\n"); |
1303 | do_gettimeofday(&sc->sc_tv_advance_start); | 1367 | o2net_set_advance_start_time(sc); |
1304 | 1368 | ||
1305 | if (unlikely(sc->sc_handshake_ok == 0)) { | 1369 | if (unlikely(sc->sc_handshake_ok == 0)) { |
1306 | if(sc->sc_page_off < sizeof(struct o2net_handshake)) { | 1370 | if(sc->sc_page_off < sizeof(struct o2net_handshake)) { |
@@ -1375,7 +1439,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc) | |||
1375 | 1439 | ||
1376 | out: | 1440 | out: |
1377 | sclog(sc, "ret = %d\n", ret); | 1441 | sclog(sc, "ret = %d\n", ret); |
1378 | do_gettimeofday(&sc->sc_tv_advance_stop); | 1442 | o2net_set_advance_stop_time(sc); |
1379 | return ret; | 1443 | return ret; |
1380 | } | 1444 | } |
1381 | 1445 | ||
@@ -1475,27 +1539,28 @@ static void o2net_idle_timer(unsigned long data) | |||
1475 | { | 1539 | { |
1476 | struct o2net_sock_container *sc = (struct o2net_sock_container *)data; | 1540 | struct o2net_sock_container *sc = (struct o2net_sock_container *)data; |
1477 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); | 1541 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
1478 | struct timeval now; | ||
1479 | 1542 | ||
1480 | do_gettimeofday(&now); | 1543 | #ifdef CONFIG_DEBUG_FS |
1544 | ktime_t now = ktime_get(); | ||
1545 | #endif | ||
1481 | 1546 | ||
1482 | printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " | 1547 | printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " |
1483 | "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), | 1548 | "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), |
1484 | o2net_idle_timeout() / 1000, | 1549 | o2net_idle_timeout() / 1000, |
1485 | o2net_idle_timeout() % 1000); | 1550 | o2net_idle_timeout() % 1000); |
1486 | mlog(ML_NOTICE, "here are some times that might help debug the " | 1551 | |
1487 | "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " | 1552 | #ifdef CONFIG_DEBUG_FS |
1488 | "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", | 1553 | mlog(ML_NOTICE, "Here are some times that might help debug the " |
1489 | sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, | 1554 | "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, " |
1490 | now.tv_sec, (long) now.tv_usec, | 1555 | "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n", |
1491 | sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, | 1556 | (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now), |
1492 | sc->sc_tv_advance_start.tv_sec, | 1557 | (long long)ktime_to_us(sc->sc_tv_data_ready), |
1493 | (long) sc->sc_tv_advance_start.tv_usec, | 1558 | (long long)ktime_to_us(sc->sc_tv_advance_start), |
1494 | sc->sc_tv_advance_stop.tv_sec, | 1559 | (long long)ktime_to_us(sc->sc_tv_advance_stop), |
1495 | (long) sc->sc_tv_advance_stop.tv_usec, | ||
1496 | sc->sc_msg_key, sc->sc_msg_type, | 1560 | sc->sc_msg_key, sc->sc_msg_type, |
1497 | sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec, | 1561 | (long long)ktime_to_us(sc->sc_tv_func_start), |
1498 | sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec); | 1562 | (long long)ktime_to_us(sc->sc_tv_func_stop)); |
1563 | #endif | ||
1499 | 1564 | ||
1500 | /* | 1565 | /* |
1501 | * Initialize the nn_timeout so that the next connection attempt | 1566 | * Initialize the nn_timeout so that the next connection attempt |
@@ -1511,7 +1576,7 @@ static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc) | |||
1511 | o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); | 1576 | o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); |
1512 | o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work, | 1577 | o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work, |
1513 | msecs_to_jiffies(o2net_keepalive_delay())); | 1578 | msecs_to_jiffies(o2net_keepalive_delay())); |
1514 | do_gettimeofday(&sc->sc_tv_timer); | 1579 | o2net_set_sock_timer(sc); |
1515 | mod_timer(&sc->sc_idle_timeout, | 1580 | mod_timer(&sc->sc_idle_timeout, |
1516 | jiffies + msecs_to_jiffies(o2net_idle_timeout())); | 1581 | jiffies + msecs_to_jiffies(o2net_idle_timeout())); |
1517 | } | 1582 | } |
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 15fdbdf9eb4b..4cbcb65784a3 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h | |||
@@ -166,18 +166,27 @@ struct o2net_sock_container { | |||
166 | /* original handlers for the sockets */ | 166 | /* original handlers for the sockets */ |
167 | void (*sc_state_change)(struct sock *sk); | 167 | void (*sc_state_change)(struct sock *sk); |
168 | void (*sc_data_ready)(struct sock *sk, int bytes); | 168 | void (*sc_data_ready)(struct sock *sk, int bytes); |
169 | #ifdef CONFIG_DEBUG_FS | 169 | |
170 | struct list_head sc_net_debug_item; | ||
171 | #endif | ||
172 | struct timeval sc_tv_timer; | ||
173 | struct timeval sc_tv_data_ready; | ||
174 | struct timeval sc_tv_advance_start; | ||
175 | struct timeval sc_tv_advance_stop; | ||
176 | struct timeval sc_tv_func_start; | ||
177 | struct timeval sc_tv_func_stop; | ||
178 | u32 sc_msg_key; | 170 | u32 sc_msg_key; |
179 | u16 sc_msg_type; | 171 | u16 sc_msg_type; |
180 | 172 | ||
173 | #ifdef CONFIG_DEBUG_FS | ||
174 | struct list_head sc_net_debug_item; | ||
175 | ktime_t sc_tv_timer; | ||
176 | ktime_t sc_tv_data_ready; | ||
177 | ktime_t sc_tv_advance_start; | ||
178 | ktime_t sc_tv_advance_stop; | ||
179 | ktime_t sc_tv_func_start; | ||
180 | ktime_t sc_tv_func_stop; | ||
181 | #endif | ||
182 | #ifdef CONFIG_OCFS2_FS_STATS | ||
183 | ktime_t sc_tv_acquiry_total; | ||
184 | ktime_t sc_tv_send_total; | ||
185 | ktime_t sc_tv_status_total; | ||
186 | u32 sc_send_count; | ||
187 | u32 sc_recv_count; | ||
188 | ktime_t sc_tv_process_total; | ||
189 | #endif | ||
181 | struct mutex sc_send_lock; | 190 | struct mutex sc_send_lock; |
182 | }; | 191 | }; |
183 | 192 | ||
@@ -220,9 +229,9 @@ struct o2net_send_tracking { | |||
220 | u32 st_msg_type; | 229 | u32 st_msg_type; |
221 | u32 st_msg_key; | 230 | u32 st_msg_key; |
222 | u8 st_node; | 231 | u8 st_node; |
223 | struct timeval st_sock_time; | 232 | ktime_t st_sock_time; |
224 | struct timeval st_send_time; | 233 | ktime_t st_send_time; |
225 | struct timeval st_status_time; | 234 | ktime_t st_status_time; |
226 | }; | 235 | }; |
227 | #else | 236 | #else |
228 | struct o2net_send_tracking { | 237 | struct o2net_send_tracking { |
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index f44999156839..3a3ed4bb794b 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -90,19 +90,29 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
90 | 90 | ||
91 | void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 91 | void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
92 | { | 92 | { |
93 | mlog_entry_void(); | 93 | struct dlm_lock_resource *res; |
94 | 94 | ||
95 | BUG_ON(!dlm); | 95 | BUG_ON(!dlm); |
96 | BUG_ON(!lock); | 96 | BUG_ON(!lock); |
97 | 97 | ||
98 | res = lock->lockres; | ||
99 | |||
98 | assert_spin_locked(&dlm->ast_lock); | 100 | assert_spin_locked(&dlm->ast_lock); |
101 | |||
99 | if (!list_empty(&lock->ast_list)) { | 102 | if (!list_empty(&lock->ast_list)) { |
100 | mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", | 103 | mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " |
104 | "AST list not empty, pending %d, newlevel %d\n", | ||
105 | dlm->name, res->lockname.len, res->lockname.name, | ||
106 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
107 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
101 | lock->ast_pending, lock->ml.type); | 108 | lock->ast_pending, lock->ml.type); |
102 | BUG(); | 109 | BUG(); |
103 | } | 110 | } |
104 | if (lock->ast_pending) | 111 | if (lock->ast_pending) |
105 | mlog(0, "lock has an ast getting flushed right now\n"); | 112 | mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", |
113 | dlm->name, res->lockname.len, res->lockname.name, | ||
114 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
115 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
106 | 116 | ||
107 | /* putting lock on list, add a ref */ | 117 | /* putting lock on list, add a ref */ |
108 | dlm_lock_get(lock); | 118 | dlm_lock_get(lock); |
@@ -110,9 +120,10 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
110 | 120 | ||
111 | /* check to see if this ast obsoletes the bast */ | 121 | /* check to see if this ast obsoletes the bast */ |
112 | if (dlm_should_cancel_bast(dlm, lock)) { | 122 | if (dlm_should_cancel_bast(dlm, lock)) { |
113 | struct dlm_lock_resource *res = lock->lockres; | 123 | mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", |
114 | mlog(0, "%s: cancelling bast for %.*s\n", | 124 | dlm->name, res->lockname.len, res->lockname.name, |
115 | dlm->name, res->lockname.len, res->lockname.name); | 125 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), |
126 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
116 | lock->bast_pending = 0; | 127 | lock->bast_pending = 0; |
117 | list_del_init(&lock->bast_list); | 128 | list_del_init(&lock->bast_list); |
118 | lock->ml.highest_blocked = LKM_IVMODE; | 129 | lock->ml.highest_blocked = LKM_IVMODE; |
@@ -134,8 +145,6 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
134 | 145 | ||
135 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 146 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
136 | { | 147 | { |
137 | mlog_entry_void(); | ||
138 | |||
139 | BUG_ON(!dlm); | 148 | BUG_ON(!dlm); |
140 | BUG_ON(!lock); | 149 | BUG_ON(!lock); |
141 | 150 | ||
@@ -147,15 +156,21 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
147 | 156 | ||
148 | void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 157 | void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
149 | { | 158 | { |
150 | mlog_entry_void(); | 159 | struct dlm_lock_resource *res; |
151 | 160 | ||
152 | BUG_ON(!dlm); | 161 | BUG_ON(!dlm); |
153 | BUG_ON(!lock); | 162 | BUG_ON(!lock); |
163 | |||
154 | assert_spin_locked(&dlm->ast_lock); | 164 | assert_spin_locked(&dlm->ast_lock); |
155 | 165 | ||
166 | res = lock->lockres; | ||
167 | |||
156 | BUG_ON(!list_empty(&lock->bast_list)); | 168 | BUG_ON(!list_empty(&lock->bast_list)); |
157 | if (lock->bast_pending) | 169 | if (lock->bast_pending) |
158 | mlog(0, "lock has a bast getting flushed right now\n"); | 170 | mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", |
171 | dlm->name, res->lockname.len, res->lockname.name, | ||
172 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
173 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
159 | 174 | ||
160 | /* putting lock on list, add a ref */ | 175 | /* putting lock on list, add a ref */ |
161 | dlm_lock_get(lock); | 176 | dlm_lock_get(lock); |
@@ -167,8 +182,6 @@ void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
167 | 182 | ||
168 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 183 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
169 | { | 184 | { |
170 | mlog_entry_void(); | ||
171 | |||
172 | BUG_ON(!dlm); | 185 | BUG_ON(!dlm); |
173 | BUG_ON(!lock); | 186 | BUG_ON(!lock); |
174 | 187 | ||
@@ -213,7 +226,10 @@ void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
213 | dlm_astlockfunc_t *fn; | 226 | dlm_astlockfunc_t *fn; |
214 | struct dlm_lockstatus *lksb; | 227 | struct dlm_lockstatus *lksb; |
215 | 228 | ||
216 | mlog_entry_void(); | 229 | mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, |
230 | res->lockname.len, res->lockname.name, | ||
231 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
232 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
217 | 233 | ||
218 | lksb = lock->lksb; | 234 | lksb = lock->lksb; |
219 | fn = lock->ast; | 235 | fn = lock->ast; |
@@ -231,7 +247,10 @@ int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
231 | struct dlm_lockstatus *lksb; | 247 | struct dlm_lockstatus *lksb; |
232 | int lksbflags; | 248 | int lksbflags; |
233 | 249 | ||
234 | mlog_entry_void(); | 250 | mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, |
251 | res->lockname.len, res->lockname.name, | ||
252 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
253 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); | ||
235 | 254 | ||
236 | lksb = lock->lksb; | 255 | lksb = lock->lksb; |
237 | BUG_ON(lock->ml.node == dlm->node_num); | 256 | BUG_ON(lock->ml.node == dlm->node_num); |
@@ -250,9 +269,14 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
250 | { | 269 | { |
251 | dlm_bastlockfunc_t *fn = lock->bast; | 270 | dlm_bastlockfunc_t *fn = lock->bast; |
252 | 271 | ||
253 | mlog_entry_void(); | ||
254 | BUG_ON(lock->ml.node != dlm->node_num); | 272 | BUG_ON(lock->ml.node != dlm->node_num); |
255 | 273 | ||
274 | mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", | ||
275 | dlm->name, res->lockname.len, res->lockname.name, | ||
276 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
277 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
278 | blocked_type); | ||
279 | |||
256 | (*fn)(lock->astdata, blocked_type); | 280 | (*fn)(lock->astdata, blocked_type); |
257 | } | 281 | } |
258 | 282 | ||
@@ -332,7 +356,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, | |||
332 | /* cannot get a proxy ast message if this node owns it */ | 356 | /* cannot get a proxy ast message if this node owns it */ |
333 | BUG_ON(res->owner == dlm->node_num); | 357 | BUG_ON(res->owner == dlm->node_num); |
334 | 358 | ||
335 | mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); | 359 | mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, |
360 | res->lockname.name); | ||
336 | 361 | ||
337 | spin_lock(&res->spinlock); | 362 | spin_lock(&res->spinlock); |
338 | if (res->state & DLM_LOCK_RES_RECOVERING) { | 363 | if (res->state & DLM_LOCK_RES_RECOVERING) { |
@@ -382,8 +407,12 @@ do_ast: | |||
382 | if (past->type == DLM_AST) { | 407 | if (past->type == DLM_AST) { |
383 | /* do not alter lock refcount. switching lists. */ | 408 | /* do not alter lock refcount. switching lists. */ |
384 | list_move_tail(&lock->list, &res->granted); | 409 | list_move_tail(&lock->list, &res->granted); |
385 | mlog(0, "ast: Adding to granted list... type=%d, " | 410 | mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n", |
386 | "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); | 411 | dlm->name, res->lockname.len, res->lockname.name, |
412 | dlm_get_lock_cookie_node(be64_to_cpu(cookie)), | ||
413 | dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), | ||
414 | lock->ml.type, lock->ml.convert_type); | ||
415 | |||
387 | if (lock->ml.convert_type != LKM_IVMODE) { | 416 | if (lock->ml.convert_type != LKM_IVMODE) { |
388 | lock->ml.type = lock->ml.convert_type; | 417 | lock->ml.type = lock->ml.convert_type; |
389 | lock->ml.convert_type = LKM_IVMODE; | 418 | lock->ml.convert_type = LKM_IVMODE; |
@@ -426,9 +455,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
426 | size_t veclen = 1; | 455 | size_t veclen = 1; |
427 | int status; | 456 | int status; |
428 | 457 | ||
429 | mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", | 458 | mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name, |
430 | res->lockname.len, res->lockname.name, lock->ml.node, | 459 | res->lockname.len, res->lockname.name, lock->ml.node, msg_type, |
431 | msg_type, blocked_type); | 460 | blocked_type); |
432 | 461 | ||
433 | memset(&past, 0, sizeof(struct dlm_proxy_ast)); | 462 | memset(&past, 0, sizeof(struct dlm_proxy_ast)); |
434 | past.node_idx = dlm->node_num; | 463 | past.node_idx = dlm->node_num; |
@@ -441,7 +470,6 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
441 | vec[0].iov_len = sizeof(struct dlm_proxy_ast); | 470 | vec[0].iov_len = sizeof(struct dlm_proxy_ast); |
442 | vec[0].iov_base = &past; | 471 | vec[0].iov_base = &past; |
443 | if (flags & DLM_LKSB_GET_LVB) { | 472 | if (flags & DLM_LKSB_GET_LVB) { |
444 | mlog(0, "returning requested LVB data\n"); | ||
445 | be32_add_cpu(&past.flags, LKM_GET_LVB); | 473 | be32_add_cpu(&past.flags, LKM_GET_LVB); |
446 | vec[1].iov_len = DLM_LVB_LEN; | 474 | vec[1].iov_len = DLM_LVB_LEN; |
447 | vec[1].iov_base = lock->lksb->lvb; | 475 | vec[1].iov_base = lock->lksb->lvb; |
@@ -451,8 +479,8 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | |||
451 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, | 479 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, |
452 | lock->ml.node, &status); | 480 | lock->ml.node, &status); |
453 | if (ret < 0) | 481 | if (ret < 0) |
454 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 482 | mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n", |
455 | "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key, | 483 | dlm->name, res->lockname.len, res->lockname.name, ret, |
456 | lock->ml.node); | 484 | lock->ml.node); |
457 | else { | 485 | else { |
458 | if (status == DLM_RECOVERING) { | 486 | if (status == DLM_RECOVERING) { |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index b36d0bf77a5a..4bdf7baee344 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -50,10 +50,10 @@ | |||
50 | #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) | 50 | #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) |
51 | 51 | ||
52 | enum dlm_mle_type { | 52 | enum dlm_mle_type { |
53 | DLM_MLE_BLOCK, | 53 | DLM_MLE_BLOCK = 0, |
54 | DLM_MLE_MASTER, | 54 | DLM_MLE_MASTER = 1, |
55 | DLM_MLE_MIGRATION, | 55 | DLM_MLE_MIGRATION = 2, |
56 | DLM_MLE_NUM_TYPES | 56 | DLM_MLE_NUM_TYPES = 3, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct dlm_master_list_entry { | 59 | struct dlm_master_list_entry { |
@@ -82,8 +82,8 @@ struct dlm_master_list_entry { | |||
82 | 82 | ||
83 | enum dlm_ast_type { | 83 | enum dlm_ast_type { |
84 | DLM_AST = 0, | 84 | DLM_AST = 0, |
85 | DLM_BAST, | 85 | DLM_BAST = 1, |
86 | DLM_ASTUNLOCK | 86 | DLM_ASTUNLOCK = 2, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | 89 | ||
@@ -119,9 +119,9 @@ struct dlm_recovery_ctxt | |||
119 | 119 | ||
120 | enum dlm_ctxt_state { | 120 | enum dlm_ctxt_state { |
121 | DLM_CTXT_NEW = 0, | 121 | DLM_CTXT_NEW = 0, |
122 | DLM_CTXT_JOINED, | 122 | DLM_CTXT_JOINED = 1, |
123 | DLM_CTXT_IN_SHUTDOWN, | 123 | DLM_CTXT_IN_SHUTDOWN = 2, |
124 | DLM_CTXT_LEAVING, | 124 | DLM_CTXT_LEAVING = 3, |
125 | }; | 125 | }; |
126 | 126 | ||
127 | struct dlm_ctxt | 127 | struct dlm_ctxt |
@@ -388,8 +388,8 @@ struct dlm_lock | |||
388 | 388 | ||
389 | enum dlm_lockres_list { | 389 | enum dlm_lockres_list { |
390 | DLM_GRANTED_LIST = 0, | 390 | DLM_GRANTED_LIST = 0, |
391 | DLM_CONVERTING_LIST, | 391 | DLM_CONVERTING_LIST = 1, |
392 | DLM_BLOCKED_LIST | 392 | DLM_BLOCKED_LIST = 2, |
393 | }; | 393 | }; |
394 | 394 | ||
395 | static inline int dlm_lvb_is_empty(char *lvb) | 395 | static inline int dlm_lvb_is_empty(char *lvb) |
@@ -427,27 +427,27 @@ struct dlm_node_iter | |||
427 | 427 | ||
428 | 428 | ||
429 | enum { | 429 | enum { |
430 | DLM_MASTER_REQUEST_MSG = 500, | 430 | DLM_MASTER_REQUEST_MSG = 500, |
431 | DLM_UNUSED_MSG1, /* 501 */ | 431 | DLM_UNUSED_MSG1 = 501, |
432 | DLM_ASSERT_MASTER_MSG, /* 502 */ | 432 | DLM_ASSERT_MASTER_MSG = 502, |
433 | DLM_CREATE_LOCK_MSG, /* 503 */ | 433 | DLM_CREATE_LOCK_MSG = 503, |
434 | DLM_CONVERT_LOCK_MSG, /* 504 */ | 434 | DLM_CONVERT_LOCK_MSG = 504, |
435 | DLM_PROXY_AST_MSG, /* 505 */ | 435 | DLM_PROXY_AST_MSG = 505, |
436 | DLM_UNLOCK_LOCK_MSG, /* 506 */ | 436 | DLM_UNLOCK_LOCK_MSG = 506, |
437 | DLM_DEREF_LOCKRES_MSG, /* 507 */ | 437 | DLM_DEREF_LOCKRES_MSG = 507, |
438 | DLM_MIGRATE_REQUEST_MSG, /* 508 */ | 438 | DLM_MIGRATE_REQUEST_MSG = 508, |
439 | DLM_MIG_LOCKRES_MSG, /* 509 */ | 439 | DLM_MIG_LOCKRES_MSG = 509, |
440 | DLM_QUERY_JOIN_MSG, /* 510 */ | 440 | DLM_QUERY_JOIN_MSG = 510, |
441 | DLM_ASSERT_JOINED_MSG, /* 511 */ | 441 | DLM_ASSERT_JOINED_MSG = 511, |
442 | DLM_CANCEL_JOIN_MSG, /* 512 */ | 442 | DLM_CANCEL_JOIN_MSG = 512, |
443 | DLM_EXIT_DOMAIN_MSG, /* 513 */ | 443 | DLM_EXIT_DOMAIN_MSG = 513, |
444 | DLM_MASTER_REQUERY_MSG, /* 514 */ | 444 | DLM_MASTER_REQUERY_MSG = 514, |
445 | DLM_LOCK_REQUEST_MSG, /* 515 */ | 445 | DLM_LOCK_REQUEST_MSG = 515, |
446 | DLM_RECO_DATA_DONE_MSG, /* 516 */ | 446 | DLM_RECO_DATA_DONE_MSG = 516, |
447 | DLM_BEGIN_RECO_MSG, /* 517 */ | 447 | DLM_BEGIN_RECO_MSG = 517, |
448 | DLM_FINALIZE_RECO_MSG, /* 518 */ | 448 | DLM_FINALIZE_RECO_MSG = 518, |
449 | DLM_QUERY_REGION, /* 519 */ | 449 | DLM_QUERY_REGION = 519, |
450 | DLM_QUERY_NODEINFO, /* 520 */ | 450 | DLM_QUERY_NODEINFO = 520, |
451 | }; | 451 | }; |
452 | 452 | ||
453 | struct dlm_reco_node_data | 453 | struct dlm_reco_node_data |
@@ -460,19 +460,19 @@ struct dlm_reco_node_data | |||
460 | enum { | 460 | enum { |
461 | DLM_RECO_NODE_DATA_DEAD = -1, | 461 | DLM_RECO_NODE_DATA_DEAD = -1, |
462 | DLM_RECO_NODE_DATA_INIT = 0, | 462 | DLM_RECO_NODE_DATA_INIT = 0, |
463 | DLM_RECO_NODE_DATA_REQUESTING, | 463 | DLM_RECO_NODE_DATA_REQUESTING = 1, |
464 | DLM_RECO_NODE_DATA_REQUESTED, | 464 | DLM_RECO_NODE_DATA_REQUESTED = 2, |
465 | DLM_RECO_NODE_DATA_RECEIVING, | 465 | DLM_RECO_NODE_DATA_RECEIVING = 3, |
466 | DLM_RECO_NODE_DATA_DONE, | 466 | DLM_RECO_NODE_DATA_DONE = 4, |
467 | DLM_RECO_NODE_DATA_FINALIZE_SENT, | 467 | DLM_RECO_NODE_DATA_FINALIZE_SENT = 5, |
468 | }; | 468 | }; |
469 | 469 | ||
470 | 470 | ||
471 | enum { | 471 | enum { |
472 | DLM_MASTER_RESP_NO = 0, | 472 | DLM_MASTER_RESP_NO = 0, |
473 | DLM_MASTER_RESP_YES, | 473 | DLM_MASTER_RESP_YES = 1, |
474 | DLM_MASTER_RESP_MAYBE, | 474 | DLM_MASTER_RESP_MAYBE = 2, |
475 | DLM_MASTER_RESP_ERROR | 475 | DLM_MASTER_RESP_ERROR = 3, |
476 | }; | 476 | }; |
477 | 477 | ||
478 | 478 | ||
@@ -649,9 +649,9 @@ struct dlm_proxy_ast | |||
649 | #define DLM_MOD_KEY (0x666c6172) | 649 | #define DLM_MOD_KEY (0x666c6172) |
650 | enum dlm_query_join_response_code { | 650 | enum dlm_query_join_response_code { |
651 | JOIN_DISALLOW = 0, | 651 | JOIN_DISALLOW = 0, |
652 | JOIN_OK, | 652 | JOIN_OK = 1, |
653 | JOIN_OK_NO_MAP, | 653 | JOIN_OK_NO_MAP = 2, |
654 | JOIN_PROTOCOL_MISMATCH, | 654 | JOIN_PROTOCOL_MISMATCH = 3, |
655 | }; | 655 | }; |
656 | 656 | ||
657 | struct dlm_query_join_packet { | 657 | struct dlm_query_join_packet { |
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 272ec8631a51..04a32be0aeb9 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -370,92 +370,46 @@ static void dlm_debug_get(struct dlm_debug_ctxt *dc) | |||
370 | kref_get(&dc->debug_refcnt); | 370 | kref_get(&dc->debug_refcnt); |
371 | } | 371 | } |
372 | 372 | ||
373 | static struct debug_buffer *debug_buffer_allocate(void) | 373 | static int debug_release(struct inode *inode, struct file *file) |
374 | { | 374 | { |
375 | struct debug_buffer *db = NULL; | 375 | free_page((unsigned long)file->private_data); |
376 | 376 | return 0; | |
377 | db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); | ||
378 | if (!db) | ||
379 | goto bail; | ||
380 | |||
381 | db->len = PAGE_SIZE; | ||
382 | db->buf = kmalloc(db->len, GFP_KERNEL); | ||
383 | if (!db->buf) | ||
384 | goto bail; | ||
385 | |||
386 | return db; | ||
387 | bail: | ||
388 | kfree(db); | ||
389 | return NULL; | ||
390 | } | ||
391 | |||
392 | static ssize_t debug_buffer_read(struct file *file, char __user *buf, | ||
393 | size_t nbytes, loff_t *ppos) | ||
394 | { | ||
395 | struct debug_buffer *db = file->private_data; | ||
396 | |||
397 | return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len); | ||
398 | } | ||
399 | |||
400 | static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence) | ||
401 | { | ||
402 | struct debug_buffer *db = file->private_data; | ||
403 | loff_t new = -1; | ||
404 | |||
405 | switch (whence) { | ||
406 | case 0: | ||
407 | new = off; | ||
408 | break; | ||
409 | case 1: | ||
410 | new = file->f_pos + off; | ||
411 | break; | ||
412 | } | ||
413 | |||
414 | if (new < 0 || new > db->len) | ||
415 | return -EINVAL; | ||
416 | |||
417 | return (file->f_pos = new); | ||
418 | } | 377 | } |
419 | 378 | ||
420 | static int debug_buffer_release(struct inode *inode, struct file *file) | 379 | static ssize_t debug_read(struct file *file, char __user *buf, |
380 | size_t nbytes, loff_t *ppos) | ||
421 | { | 381 | { |
422 | struct debug_buffer *db = file->private_data; | 382 | return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, |
423 | 383 | i_size_read(file->f_mapping->host)); | |
424 | if (db) | ||
425 | kfree(db->buf); | ||
426 | kfree(db); | ||
427 | |||
428 | return 0; | ||
429 | } | 384 | } |
430 | /* end - util funcs */ | 385 | /* end - util funcs */ |
431 | 386 | ||
432 | /* begin - purge list funcs */ | 387 | /* begin - purge list funcs */ |
433 | static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | 388 | static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len) |
434 | { | 389 | { |
435 | struct dlm_lock_resource *res; | 390 | struct dlm_lock_resource *res; |
436 | int out = 0; | 391 | int out = 0; |
437 | unsigned long total = 0; | 392 | unsigned long total = 0; |
438 | 393 | ||
439 | out += snprintf(db->buf + out, db->len - out, | 394 | out += snprintf(buf + out, len - out, |
440 | "Dumping Purgelist for Domain: %s\n", dlm->name); | 395 | "Dumping Purgelist for Domain: %s\n", dlm->name); |
441 | 396 | ||
442 | spin_lock(&dlm->spinlock); | 397 | spin_lock(&dlm->spinlock); |
443 | list_for_each_entry(res, &dlm->purge_list, purge) { | 398 | list_for_each_entry(res, &dlm->purge_list, purge) { |
444 | ++total; | 399 | ++total; |
445 | if (db->len - out < 100) | 400 | if (len - out < 100) |
446 | continue; | 401 | continue; |
447 | spin_lock(&res->spinlock); | 402 | spin_lock(&res->spinlock); |
448 | out += stringify_lockname(res->lockname.name, | 403 | out += stringify_lockname(res->lockname.name, |
449 | res->lockname.len, | 404 | res->lockname.len, |
450 | db->buf + out, db->len - out); | 405 | buf + out, len - out); |
451 | out += snprintf(db->buf + out, db->len - out, "\t%ld\n", | 406 | out += snprintf(buf + out, len - out, "\t%ld\n", |
452 | (jiffies - res->last_used)/HZ); | 407 | (jiffies - res->last_used)/HZ); |
453 | spin_unlock(&res->spinlock); | 408 | spin_unlock(&res->spinlock); |
454 | } | 409 | } |
455 | spin_unlock(&dlm->spinlock); | 410 | spin_unlock(&dlm->spinlock); |
456 | 411 | ||
457 | out += snprintf(db->buf + out, db->len - out, | 412 | out += snprintf(buf + out, len - out, "Total on list: %ld\n", total); |
458 | "Total on list: %ld\n", total); | ||
459 | 413 | ||
460 | return out; | 414 | return out; |
461 | } | 415 | } |
@@ -463,15 +417,15 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
463 | static int debug_purgelist_open(struct inode *inode, struct file *file) | 417 | static int debug_purgelist_open(struct inode *inode, struct file *file) |
464 | { | 418 | { |
465 | struct dlm_ctxt *dlm = inode->i_private; | 419 | struct dlm_ctxt *dlm = inode->i_private; |
466 | struct debug_buffer *db; | 420 | char *buf = NULL; |
467 | 421 | ||
468 | db = debug_buffer_allocate(); | 422 | buf = (char *) get_zeroed_page(GFP_NOFS); |
469 | if (!db) | 423 | if (!buf) |
470 | goto bail; | 424 | goto bail; |
471 | 425 | ||
472 | db->len = debug_purgelist_print(dlm, db); | 426 | i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1)); |
473 | 427 | ||
474 | file->private_data = db; | 428 | file->private_data = buf; |
475 | 429 | ||
476 | return 0; | 430 | return 0; |
477 | bail: | 431 | bail: |
@@ -480,14 +434,14 @@ bail: | |||
480 | 434 | ||
481 | static const struct file_operations debug_purgelist_fops = { | 435 | static const struct file_operations debug_purgelist_fops = { |
482 | .open = debug_purgelist_open, | 436 | .open = debug_purgelist_open, |
483 | .release = debug_buffer_release, | 437 | .release = debug_release, |
484 | .read = debug_buffer_read, | 438 | .read = debug_read, |
485 | .llseek = debug_buffer_llseek, | 439 | .llseek = generic_file_llseek, |
486 | }; | 440 | }; |
487 | /* end - purge list funcs */ | 441 | /* end - purge list funcs */ |
488 | 442 | ||
489 | /* begin - debug mle funcs */ | 443 | /* begin - debug mle funcs */ |
490 | static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | 444 | static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len) |
491 | { | 445 | { |
492 | struct dlm_master_list_entry *mle; | 446 | struct dlm_master_list_entry *mle; |
493 | struct hlist_head *bucket; | 447 | struct hlist_head *bucket; |
@@ -495,7 +449,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
495 | int i, out = 0; | 449 | int i, out = 0; |
496 | unsigned long total = 0, longest = 0, bucket_count = 0; | 450 | unsigned long total = 0, longest = 0, bucket_count = 0; |
497 | 451 | ||
498 | out += snprintf(db->buf + out, db->len - out, | 452 | out += snprintf(buf + out, len - out, |
499 | "Dumping MLEs for Domain: %s\n", dlm->name); | 453 | "Dumping MLEs for Domain: %s\n", dlm->name); |
500 | 454 | ||
501 | spin_lock(&dlm->master_lock); | 455 | spin_lock(&dlm->master_lock); |
@@ -506,16 +460,16 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
506 | master_hash_node); | 460 | master_hash_node); |
507 | ++total; | 461 | ++total; |
508 | ++bucket_count; | 462 | ++bucket_count; |
509 | if (db->len - out < 200) | 463 | if (len - out < 200) |
510 | continue; | 464 | continue; |
511 | out += dump_mle(mle, db->buf + out, db->len - out); | 465 | out += dump_mle(mle, buf + out, len - out); |
512 | } | 466 | } |
513 | longest = max(longest, bucket_count); | 467 | longest = max(longest, bucket_count); |
514 | bucket_count = 0; | 468 | bucket_count = 0; |
515 | } | 469 | } |
516 | spin_unlock(&dlm->master_lock); | 470 | spin_unlock(&dlm->master_lock); |
517 | 471 | ||
518 | out += snprintf(db->buf + out, db->len - out, | 472 | out += snprintf(buf + out, len - out, |
519 | "Total: %ld, Longest: %ld\n", total, longest); | 473 | "Total: %ld, Longest: %ld\n", total, longest); |
520 | return out; | 474 | return out; |
521 | } | 475 | } |
@@ -523,15 +477,15 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
523 | static int debug_mle_open(struct inode *inode, struct file *file) | 477 | static int debug_mle_open(struct inode *inode, struct file *file) |
524 | { | 478 | { |
525 | struct dlm_ctxt *dlm = inode->i_private; | 479 | struct dlm_ctxt *dlm = inode->i_private; |
526 | struct debug_buffer *db; | 480 | char *buf = NULL; |
527 | 481 | ||
528 | db = debug_buffer_allocate(); | 482 | buf = (char *) get_zeroed_page(GFP_NOFS); |
529 | if (!db) | 483 | if (!buf) |
530 | goto bail; | 484 | goto bail; |
531 | 485 | ||
532 | db->len = debug_mle_print(dlm, db); | 486 | i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1)); |
533 | 487 | ||
534 | file->private_data = db; | 488 | file->private_data = buf; |
535 | 489 | ||
536 | return 0; | 490 | return 0; |
537 | bail: | 491 | bail: |
@@ -540,9 +494,9 @@ bail: | |||
540 | 494 | ||
541 | static const struct file_operations debug_mle_fops = { | 495 | static const struct file_operations debug_mle_fops = { |
542 | .open = debug_mle_open, | 496 | .open = debug_mle_open, |
543 | .release = debug_buffer_release, | 497 | .release = debug_release, |
544 | .read = debug_buffer_read, | 498 | .read = debug_read, |
545 | .llseek = debug_buffer_llseek, | 499 | .llseek = generic_file_llseek, |
546 | }; | 500 | }; |
547 | 501 | ||
548 | /* end - debug mle funcs */ | 502 | /* end - debug mle funcs */ |
@@ -757,7 +711,7 @@ static const struct file_operations debug_lockres_fops = { | |||
757 | /* end - debug lockres funcs */ | 711 | /* end - debug lockres funcs */ |
758 | 712 | ||
759 | /* begin - debug state funcs */ | 713 | /* begin - debug state funcs */ |
760 | static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | 714 | static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len) |
761 | { | 715 | { |
762 | int out = 0; | 716 | int out = 0; |
763 | struct dlm_reco_node_data *node; | 717 | struct dlm_reco_node_data *node; |
@@ -781,35 +735,35 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
781 | } | 735 | } |
782 | 736 | ||
783 | /* Domain: xxxxxxxxxx Key: 0xdfbac769 */ | 737 | /* Domain: xxxxxxxxxx Key: 0xdfbac769 */ |
784 | out += snprintf(db->buf + out, db->len - out, | 738 | out += snprintf(buf + out, len - out, |
785 | "Domain: %s Key: 0x%08x Protocol: %d.%d\n", | 739 | "Domain: %s Key: 0x%08x Protocol: %d.%d\n", |
786 | dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major, | 740 | dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major, |
787 | dlm->dlm_locking_proto.pv_minor); | 741 | dlm->dlm_locking_proto.pv_minor); |
788 | 742 | ||
789 | /* Thread Pid: xxx Node: xxx State: xxxxx */ | 743 | /* Thread Pid: xxx Node: xxx State: xxxxx */ |
790 | out += snprintf(db->buf + out, db->len - out, | 744 | out += snprintf(buf + out, len - out, |
791 | "Thread Pid: %d Node: %d State: %s\n", | 745 | "Thread Pid: %d Node: %d State: %s\n", |
792 | dlm->dlm_thread_task->pid, dlm->node_num, state); | 746 | task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state); |
793 | 747 | ||
794 | /* Number of Joins: xxx Joining Node: xxx */ | 748 | /* Number of Joins: xxx Joining Node: xxx */ |
795 | out += snprintf(db->buf + out, db->len - out, | 749 | out += snprintf(buf + out, len - out, |
796 | "Number of Joins: %d Joining Node: %d\n", | 750 | "Number of Joins: %d Joining Node: %d\n", |
797 | dlm->num_joins, dlm->joining_node); | 751 | dlm->num_joins, dlm->joining_node); |
798 | 752 | ||
799 | /* Domain Map: xx xx xx */ | 753 | /* Domain Map: xx xx xx */ |
800 | out += snprintf(db->buf + out, db->len - out, "Domain Map: "); | 754 | out += snprintf(buf + out, len - out, "Domain Map: "); |
801 | out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES, | 755 | out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES, |
802 | db->buf + out, db->len - out); | 756 | buf + out, len - out); |
803 | out += snprintf(db->buf + out, db->len - out, "\n"); | 757 | out += snprintf(buf + out, len - out, "\n"); |
804 | 758 | ||
805 | /* Live Map: xx xx xx */ | 759 | /* Live Map: xx xx xx */ |
806 | out += snprintf(db->buf + out, db->len - out, "Live Map: "); | 760 | out += snprintf(buf + out, len - out, "Live Map: "); |
807 | out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, | 761 | out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, |
808 | db->buf + out, db->len - out); | 762 | buf + out, len - out); |
809 | out += snprintf(db->buf + out, db->len - out, "\n"); | 763 | out += snprintf(buf + out, len - out, "\n"); |
810 | 764 | ||
811 | /* Lock Resources: xxx (xxx) */ | 765 | /* Lock Resources: xxx (xxx) */ |
812 | out += snprintf(db->buf + out, db->len - out, | 766 | out += snprintf(buf + out, len - out, |
813 | "Lock Resources: %d (%d)\n", | 767 | "Lock Resources: %d (%d)\n", |
814 | atomic_read(&dlm->res_cur_count), | 768 | atomic_read(&dlm->res_cur_count), |
815 | atomic_read(&dlm->res_tot_count)); | 769 | atomic_read(&dlm->res_tot_count)); |
@@ -821,29 +775,29 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
821 | cur_mles += atomic_read(&dlm->mle_cur_count[i]); | 775 | cur_mles += atomic_read(&dlm->mle_cur_count[i]); |
822 | 776 | ||
823 | /* MLEs: xxx (xxx) */ | 777 | /* MLEs: xxx (xxx) */ |
824 | out += snprintf(db->buf + out, db->len - out, | 778 | out += snprintf(buf + out, len - out, |
825 | "MLEs: %d (%d)\n", cur_mles, tot_mles); | 779 | "MLEs: %d (%d)\n", cur_mles, tot_mles); |
826 | 780 | ||
827 | /* Blocking: xxx (xxx) */ | 781 | /* Blocking: xxx (xxx) */ |
828 | out += snprintf(db->buf + out, db->len - out, | 782 | out += snprintf(buf + out, len - out, |
829 | " Blocking: %d (%d)\n", | 783 | " Blocking: %d (%d)\n", |
830 | atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]), | 784 | atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]), |
831 | atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK])); | 785 | atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK])); |
832 | 786 | ||
833 | /* Mastery: xxx (xxx) */ | 787 | /* Mastery: xxx (xxx) */ |
834 | out += snprintf(db->buf + out, db->len - out, | 788 | out += snprintf(buf + out, len - out, |
835 | " Mastery: %d (%d)\n", | 789 | " Mastery: %d (%d)\n", |
836 | atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]), | 790 | atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]), |
837 | atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER])); | 791 | atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER])); |
838 | 792 | ||
839 | /* Migration: xxx (xxx) */ | 793 | /* Migration: xxx (xxx) */ |
840 | out += snprintf(db->buf + out, db->len - out, | 794 | out += snprintf(buf + out, len - out, |
841 | " Migration: %d (%d)\n", | 795 | " Migration: %d (%d)\n", |
842 | atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]), | 796 | atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]), |
843 | atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION])); | 797 | atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION])); |
844 | 798 | ||
845 | /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */ | 799 | /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */ |
846 | out += snprintf(db->buf + out, db->len - out, | 800 | out += snprintf(buf + out, len - out, |
847 | "Lists: Dirty=%s Purge=%s PendingASTs=%s " | 801 | "Lists: Dirty=%s Purge=%s PendingASTs=%s " |
848 | "PendingBASTs=%s\n", | 802 | "PendingBASTs=%s\n", |
849 | (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"), | 803 | (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"), |
@@ -852,12 +806,12 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
852 | (list_empty(&dlm->pending_basts) ? "Empty" : "InUse")); | 806 | (list_empty(&dlm->pending_basts) ? "Empty" : "InUse")); |
853 | 807 | ||
854 | /* Purge Count: xxx Refs: xxx */ | 808 | /* Purge Count: xxx Refs: xxx */ |
855 | out += snprintf(db->buf + out, db->len - out, | 809 | out += snprintf(buf + out, len - out, |
856 | "Purge Count: %d Refs: %d\n", dlm->purge_count, | 810 | "Purge Count: %d Refs: %d\n", dlm->purge_count, |
857 | atomic_read(&dlm->dlm_refs.refcount)); | 811 | atomic_read(&dlm->dlm_refs.refcount)); |
858 | 812 | ||
859 | /* Dead Node: xxx */ | 813 | /* Dead Node: xxx */ |
860 | out += snprintf(db->buf + out, db->len - out, | 814 | out += snprintf(buf + out, len - out, |
861 | "Dead Node: %d\n", dlm->reco.dead_node); | 815 | "Dead Node: %d\n", dlm->reco.dead_node); |
862 | 816 | ||
863 | /* What about DLM_RECO_STATE_FINALIZE? */ | 817 | /* What about DLM_RECO_STATE_FINALIZE? */ |
@@ -867,19 +821,19 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
867 | state = "INACTIVE"; | 821 | state = "INACTIVE"; |
868 | 822 | ||
869 | /* Recovery Pid: xxxx Master: xxx State: xxxx */ | 823 | /* Recovery Pid: xxxx Master: xxx State: xxxx */ |
870 | out += snprintf(db->buf + out, db->len - out, | 824 | out += snprintf(buf + out, len - out, |
871 | "Recovery Pid: %d Master: %d State: %s\n", | 825 | "Recovery Pid: %d Master: %d State: %s\n", |
872 | dlm->dlm_reco_thread_task->pid, | 826 | task_pid_nr(dlm->dlm_reco_thread_task), |
873 | dlm->reco.new_master, state); | 827 | dlm->reco.new_master, state); |
874 | 828 | ||
875 | /* Recovery Map: xx xx */ | 829 | /* Recovery Map: xx xx */ |
876 | out += snprintf(db->buf + out, db->len - out, "Recovery Map: "); | 830 | out += snprintf(buf + out, len - out, "Recovery Map: "); |
877 | out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES, | 831 | out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES, |
878 | db->buf + out, db->len - out); | 832 | buf + out, len - out); |
879 | out += snprintf(db->buf + out, db->len - out, "\n"); | 833 | out += snprintf(buf + out, len - out, "\n"); |
880 | 834 | ||
881 | /* Recovery Node State: */ | 835 | /* Recovery Node State: */ |
882 | out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n"); | 836 | out += snprintf(buf + out, len - out, "Recovery Node State:\n"); |
883 | list_for_each_entry(node, &dlm->reco.node_data, list) { | 837 | list_for_each_entry(node, &dlm->reco.node_data, list) { |
884 | switch (node->state) { | 838 | switch (node->state) { |
885 | case DLM_RECO_NODE_DATA_INIT: | 839 | case DLM_RECO_NODE_DATA_INIT: |
@@ -907,7 +861,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
907 | state = "BAD"; | 861 | state = "BAD"; |
908 | break; | 862 | break; |
909 | } | 863 | } |
910 | out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n", | 864 | out += snprintf(buf + out, len - out, "\t%u - %s\n", |
911 | node->node_num, state); | 865 | node->node_num, state); |
912 | } | 866 | } |
913 | 867 | ||
@@ -919,15 +873,15 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) | |||
919 | static int debug_state_open(struct inode *inode, struct file *file) | 873 | static int debug_state_open(struct inode *inode, struct file *file) |
920 | { | 874 | { |
921 | struct dlm_ctxt *dlm = inode->i_private; | 875 | struct dlm_ctxt *dlm = inode->i_private; |
922 | struct debug_buffer *db = NULL; | 876 | char *buf = NULL; |
923 | 877 | ||
924 | db = debug_buffer_allocate(); | 878 | buf = (char *) get_zeroed_page(GFP_NOFS); |
925 | if (!db) | 879 | if (!buf) |
926 | goto bail; | 880 | goto bail; |
927 | 881 | ||
928 | db->len = debug_state_print(dlm, db); | 882 | i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1)); |
929 | 883 | ||
930 | file->private_data = db; | 884 | file->private_data = buf; |
931 | 885 | ||
932 | return 0; | 886 | return 0; |
933 | bail: | 887 | bail: |
@@ -936,9 +890,9 @@ bail: | |||
936 | 890 | ||
937 | static const struct file_operations debug_state_fops = { | 891 | static const struct file_operations debug_state_fops = { |
938 | .open = debug_state_open, | 892 | .open = debug_state_open, |
939 | .release = debug_buffer_release, | 893 | .release = debug_release, |
940 | .read = debug_buffer_read, | 894 | .read = debug_read, |
941 | .llseek = debug_buffer_llseek, | 895 | .llseek = generic_file_llseek, |
942 | }; | 896 | }; |
943 | /* end - debug state funcs */ | 897 | /* end - debug state funcs */ |
944 | 898 | ||
@@ -1002,14 +956,10 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm) | |||
1002 | struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; | 956 | struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; |
1003 | 957 | ||
1004 | if (dc) { | 958 | if (dc) { |
1005 | if (dc->debug_purgelist_dentry) | 959 | debugfs_remove(dc->debug_purgelist_dentry); |
1006 | debugfs_remove(dc->debug_purgelist_dentry); | 960 | debugfs_remove(dc->debug_mle_dentry); |
1007 | if (dc->debug_mle_dentry) | 961 | debugfs_remove(dc->debug_lockres_dentry); |
1008 | debugfs_remove(dc->debug_mle_dentry); | 962 | debugfs_remove(dc->debug_state_dentry); |
1009 | if (dc->debug_lockres_dentry) | ||
1010 | debugfs_remove(dc->debug_lockres_dentry); | ||
1011 | if (dc->debug_state_dentry) | ||
1012 | debugfs_remove(dc->debug_state_dentry); | ||
1013 | dlm_debug_put(dc); | 963 | dlm_debug_put(dc); |
1014 | } | 964 | } |
1015 | } | 965 | } |
@@ -1040,8 +990,7 @@ bail: | |||
1040 | 990 | ||
1041 | void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm) | 991 | void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm) |
1042 | { | 992 | { |
1043 | if (dlm->dlm_debugfs_subroot) | 993 | debugfs_remove(dlm->dlm_debugfs_subroot); |
1044 | debugfs_remove(dlm->dlm_debugfs_subroot); | ||
1045 | } | 994 | } |
1046 | 995 | ||
1047 | /* debugfs root */ | 996 | /* debugfs root */ |
@@ -1057,7 +1006,6 @@ int dlm_create_debugfs_root(void) | |||
1057 | 1006 | ||
1058 | void dlm_destroy_debugfs_root(void) | 1007 | void dlm_destroy_debugfs_root(void) |
1059 | { | 1008 | { |
1060 | if (dlm_debugfs_root) | 1009 | debugfs_remove(dlm_debugfs_root); |
1061 | debugfs_remove(dlm_debugfs_root); | ||
1062 | } | 1010 | } |
1063 | #endif /* CONFIG_DEBUG_FS */ | 1011 | #endif /* CONFIG_DEBUG_FS */ |
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h index 8c686d22f9c7..1f27c4812d1a 100644 --- a/fs/ocfs2/dlm/dlmdebug.h +++ b/fs/ocfs2/dlm/dlmdebug.h | |||
@@ -37,11 +37,6 @@ struct dlm_debug_ctxt { | |||
37 | struct dentry *debug_purgelist_dentry; | 37 | struct dentry *debug_purgelist_dentry; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | struct debug_buffer { | ||
41 | int len; | ||
42 | char *buf; | ||
43 | }; | ||
44 | |||
45 | struct debug_lockres { | 40 | struct debug_lockres { |
46 | int dl_len; | 41 | int dl_len; |
47 | char *dl_buf; | 42 | char *dl_buf; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index cc2aaa96cfe5..7e38a072d720 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -460,8 +460,6 @@ redo_bucket: | |||
460 | } | 460 | } |
461 | cond_resched_lock(&dlm->spinlock); | 461 | cond_resched_lock(&dlm->spinlock); |
462 | num += n; | 462 | num += n; |
463 | mlog(0, "%s: touched %d lockreses in bucket %d " | ||
464 | "(tot=%d)\n", dlm->name, n, i, num); | ||
465 | } | 463 | } |
466 | spin_unlock(&dlm->spinlock); | 464 | spin_unlock(&dlm->spinlock); |
467 | wake_up(&dlm->dlm_thread_wq); | 465 | wake_up(&dlm->dlm_thread_wq); |
@@ -1661,8 +1659,8 @@ bail: | |||
1661 | 1659 | ||
1662 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) | 1660 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) |
1663 | { | 1661 | { |
1664 | o2hb_unregister_callback(NULL, &dlm->dlm_hb_up); | 1662 | o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up); |
1665 | o2hb_unregister_callback(NULL, &dlm->dlm_hb_down); | 1663 | o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down); |
1666 | o2net_unregister_handler_list(&dlm->dlm_domain_handlers); | 1664 | o2net_unregister_handler_list(&dlm->dlm_domain_handlers); |
1667 | } | 1665 | } |
1668 | 1666 | ||
@@ -1674,13 +1672,13 @@ static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) | |||
1674 | 1672 | ||
1675 | o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, | 1673 | o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, |
1676 | dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); | 1674 | dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); |
1677 | status = o2hb_register_callback(NULL, &dlm->dlm_hb_down); | 1675 | status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down); |
1678 | if (status) | 1676 | if (status) |
1679 | goto bail; | 1677 | goto bail; |
1680 | 1678 | ||
1681 | o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, | 1679 | o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, |
1682 | dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); | 1680 | dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); |
1683 | status = o2hb_register_callback(NULL, &dlm->dlm_hb_up); | 1681 | status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up); |
1684 | if (status) | 1682 | if (status) |
1685 | goto bail; | 1683 | goto bail; |
1686 | 1684 | ||
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 69cf369961c4..7009292aac5a 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -106,6 +106,9 @@ static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, | |||
106 | 106 | ||
107 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) | 107 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) |
108 | return 0; | 108 | return 0; |
109 | if (!dlm_lock_compatible(tmplock->ml.convert_type, | ||
110 | lock->ml.type)) | ||
111 | return 0; | ||
109 | } | 112 | } |
110 | 113 | ||
111 | return 1; | 114 | return 1; |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 2211acf33d9b..1d6d1d22c471 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -122,15 +122,13 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res) | |||
122 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 122 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, |
123 | struct dlm_lock_resource *res) | 123 | struct dlm_lock_resource *res) |
124 | { | 124 | { |
125 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
126 | |||
127 | assert_spin_locked(&dlm->spinlock); | 125 | assert_spin_locked(&dlm->spinlock); |
128 | assert_spin_locked(&res->spinlock); | 126 | assert_spin_locked(&res->spinlock); |
129 | 127 | ||
130 | if (__dlm_lockres_unused(res)){ | 128 | if (__dlm_lockres_unused(res)){ |
131 | if (list_empty(&res->purge)) { | 129 | if (list_empty(&res->purge)) { |
132 | mlog(0, "putting lockres %.*s:%p onto purge list\n", | 130 | mlog(0, "%s: Adding res %.*s to purge list\n", |
133 | res->lockname.len, res->lockname.name, res); | 131 | dlm->name, res->lockname.len, res->lockname.name); |
134 | 132 | ||
135 | res->last_used = jiffies; | 133 | res->last_used = jiffies; |
136 | dlm_lockres_get(res); | 134 | dlm_lockres_get(res); |
@@ -138,8 +136,8 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
138 | dlm->purge_count++; | 136 | dlm->purge_count++; |
139 | } | 137 | } |
140 | } else if (!list_empty(&res->purge)) { | 138 | } else if (!list_empty(&res->purge)) { |
141 | mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", | 139 | mlog(0, "%s: Removing res %.*s from purge list\n", |
142 | res->lockname.len, res->lockname.name, res, res->owner); | 140 | dlm->name, res->lockname.len, res->lockname.name); |
143 | 141 | ||
144 | list_del_init(&res->purge); | 142 | list_del_init(&res->purge); |
145 | dlm_lockres_put(res); | 143 | dlm_lockres_put(res); |
@@ -150,7 +148,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
150 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | 148 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, |
151 | struct dlm_lock_resource *res) | 149 | struct dlm_lock_resource *res) |
152 | { | 150 | { |
153 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
154 | spin_lock(&dlm->spinlock); | 151 | spin_lock(&dlm->spinlock); |
155 | spin_lock(&res->spinlock); | 152 | spin_lock(&res->spinlock); |
156 | 153 | ||
@@ -171,9 +168,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
171 | 168 | ||
172 | master = (res->owner == dlm->node_num); | 169 | master = (res->owner == dlm->node_num); |
173 | 170 | ||
174 | 171 | mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, | |
175 | mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, | 172 | res->lockname.len, res->lockname.name, master); |
176 | res->lockname.name, master); | ||
177 | 173 | ||
178 | if (!master) { | 174 | if (!master) { |
179 | res->state |= DLM_LOCK_RES_DROPPING_REF; | 175 | res->state |= DLM_LOCK_RES_DROPPING_REF; |
@@ -189,27 +185,25 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, | |||
189 | /* clear our bit from the master's refmap, ignore errors */ | 185 | /* clear our bit from the master's refmap, ignore errors */ |
190 | ret = dlm_drop_lockres_ref(dlm, res); | 186 | ret = dlm_drop_lockres_ref(dlm, res); |
191 | if (ret < 0) { | 187 | if (ret < 0) { |
192 | mlog_errno(ret); | 188 | mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name, |
189 | res->lockname.len, res->lockname.name, ret); | ||
193 | if (!dlm_is_host_down(ret)) | 190 | if (!dlm_is_host_down(ret)) |
194 | BUG(); | 191 | BUG(); |
195 | } | 192 | } |
196 | mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", | ||
197 | dlm->name, res->lockname.len, res->lockname.name, ret); | ||
198 | spin_lock(&dlm->spinlock); | 193 | spin_lock(&dlm->spinlock); |
199 | spin_lock(&res->spinlock); | 194 | spin_lock(&res->spinlock); |
200 | } | 195 | } |
201 | 196 | ||
202 | if (!list_empty(&res->purge)) { | 197 | if (!list_empty(&res->purge)) { |
203 | mlog(0, "removing lockres %.*s:%p from purgelist, " | 198 | mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", |
204 | "master = %d\n", res->lockname.len, res->lockname.name, | 199 | dlm->name, res->lockname.len, res->lockname.name, master); |
205 | res, master); | ||
206 | list_del_init(&res->purge); | 200 | list_del_init(&res->purge); |
207 | dlm_lockres_put(res); | 201 | dlm_lockres_put(res); |
208 | dlm->purge_count--; | 202 | dlm->purge_count--; |
209 | } | 203 | } |
210 | 204 | ||
211 | if (!__dlm_lockres_unused(res)) { | 205 | if (!__dlm_lockres_unused(res)) { |
212 | mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n", | 206 | mlog(ML_ERROR, "%s: res %.*s in use after deref\n", |
213 | dlm->name, res->lockname.len, res->lockname.name); | 207 | dlm->name, res->lockname.len, res->lockname.name); |
214 | __dlm_print_one_lock_resource(res); | 208 | __dlm_print_one_lock_resource(res); |
215 | BUG(); | 209 | BUG(); |
@@ -266,10 +260,10 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, | |||
266 | unused = __dlm_lockres_unused(lockres); | 260 | unused = __dlm_lockres_unused(lockres); |
267 | if (!unused || | 261 | if (!unused || |
268 | (lockres->state & DLM_LOCK_RES_MIGRATING)) { | 262 | (lockres->state & DLM_LOCK_RES_MIGRATING)) { |
269 | mlog(0, "lockres %s:%.*s: is in use or " | 263 | mlog(0, "%s: res %.*s is in use or being remastered, " |
270 | "being remastered, used %d, state %d\n", | 264 | "used %d, state %d\n", dlm->name, |
271 | dlm->name, lockres->lockname.len, | 265 | lockres->lockname.len, lockres->lockname.name, |
272 | lockres->lockname.name, !unused, lockres->state); | 266 | !unused, lockres->state); |
273 | list_move_tail(&dlm->purge_list, &lockres->purge); | 267 | list_move_tail(&dlm->purge_list, &lockres->purge); |
274 | spin_unlock(&lockres->spinlock); | 268 | spin_unlock(&lockres->spinlock); |
275 | continue; | 269 | continue; |
@@ -296,15 +290,12 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | |||
296 | struct list_head *head; | 290 | struct list_head *head; |
297 | int can_grant = 1; | 291 | int can_grant = 1; |
298 | 292 | ||
299 | //mlog(0, "res->lockname.len=%d\n", res->lockname.len); | 293 | /* |
300 | //mlog(0, "res->lockname.name=%p\n", res->lockname.name); | 294 | * Because this function is called with the lockres |
301 | //mlog(0, "shuffle res %.*s\n", res->lockname.len, | ||
302 | // res->lockname.name); | ||
303 | |||
304 | /* because this function is called with the lockres | ||
305 | * spinlock, and because we know that it is not migrating/ | 295 | * spinlock, and because we know that it is not migrating/ |
306 | * recovering/in-progress, it is fine to reserve asts and | 296 | * recovering/in-progress, it is fine to reserve asts and |
307 | * basts right before queueing them all throughout */ | 297 | * basts right before queueing them all throughout |
298 | */ | ||
308 | assert_spin_locked(&dlm->ast_lock); | 299 | assert_spin_locked(&dlm->ast_lock); |
309 | assert_spin_locked(&res->spinlock); | 300 | assert_spin_locked(&res->spinlock); |
310 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| | 301 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| |
@@ -314,13 +305,13 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | |||
314 | converting: | 305 | converting: |
315 | if (list_empty(&res->converting)) | 306 | if (list_empty(&res->converting)) |
316 | goto blocked; | 307 | goto blocked; |
317 | mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, | 308 | mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name, |
318 | res->lockname.name); | 309 | res->lockname.len, res->lockname.name); |
319 | 310 | ||
320 | target = list_entry(res->converting.next, struct dlm_lock, list); | 311 | target = list_entry(res->converting.next, struct dlm_lock, list); |
321 | if (target->ml.convert_type == LKM_IVMODE) { | 312 | if (target->ml.convert_type == LKM_IVMODE) { |
322 | mlog(ML_ERROR, "%.*s: converting a lock with no " | 313 | mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n", |
323 | "convert_type!\n", res->lockname.len, res->lockname.name); | 314 | dlm->name, res->lockname.len, res->lockname.name); |
324 | BUG(); | 315 | BUG(); |
325 | } | 316 | } |
326 | head = &res->granted; | 317 | head = &res->granted; |
@@ -365,9 +356,12 @@ converting: | |||
365 | spin_lock(&target->spinlock); | 356 | spin_lock(&target->spinlock); |
366 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | 357 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); |
367 | 358 | ||
368 | mlog(0, "calling ast for converting lock: %.*s, have: %d, " | 359 | mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type " |
369 | "granting: %d, node: %u\n", res->lockname.len, | 360 | "%d => %d, node %u\n", dlm->name, res->lockname.len, |
370 | res->lockname.name, target->ml.type, | 361 | res->lockname.name, |
362 | dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), | ||
363 | dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), | ||
364 | target->ml.type, | ||
371 | target->ml.convert_type, target->ml.node); | 365 | target->ml.convert_type, target->ml.node); |
372 | 366 | ||
373 | target->ml.type = target->ml.convert_type; | 367 | target->ml.type = target->ml.convert_type; |
@@ -428,11 +422,14 @@ blocked: | |||
428 | spin_lock(&target->spinlock); | 422 | spin_lock(&target->spinlock); |
429 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | 423 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); |
430 | 424 | ||
431 | mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " | 425 | mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, " |
432 | "node: %u\n", res->lockname.len, res->lockname.name, | 426 | "node %u\n", dlm->name, res->lockname.len, |
427 | res->lockname.name, | ||
428 | dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), | ||
429 | dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), | ||
433 | target->ml.type, target->ml.node); | 430 | target->ml.type, target->ml.node); |
434 | 431 | ||
435 | // target->ml.type is already correct | 432 | /* target->ml.type is already correct */ |
436 | list_move_tail(&target->list, &res->granted); | 433 | list_move_tail(&target->list, &res->granted); |
437 | 434 | ||
438 | BUG_ON(!target->lksb); | 435 | BUG_ON(!target->lksb); |
@@ -453,7 +450,6 @@ leave: | |||
453 | /* must have NO locks when calling this with res !=NULL * */ | 450 | /* must have NO locks when calling this with res !=NULL * */ |
454 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | 451 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
455 | { | 452 | { |
456 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
457 | if (res) { | 453 | if (res) { |
458 | spin_lock(&dlm->spinlock); | 454 | spin_lock(&dlm->spinlock); |
459 | spin_lock(&res->spinlock); | 455 | spin_lock(&res->spinlock); |
@@ -466,8 +462,6 @@ void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
466 | 462 | ||
467 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | 463 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
468 | { | 464 | { |
469 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
470 | |||
471 | assert_spin_locked(&dlm->spinlock); | 465 | assert_spin_locked(&dlm->spinlock); |
472 | assert_spin_locked(&res->spinlock); | 466 | assert_spin_locked(&res->spinlock); |
473 | 467 | ||
@@ -484,13 +478,16 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |||
484 | res->state |= DLM_LOCK_RES_DIRTY; | 478 | res->state |= DLM_LOCK_RES_DIRTY; |
485 | } | 479 | } |
486 | } | 480 | } |
481 | |||
482 | mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, | ||
483 | res->lockname.name); | ||
487 | } | 484 | } |
488 | 485 | ||
489 | 486 | ||
490 | /* Launch the NM thread for the mounted volume */ | 487 | /* Launch the NM thread for the mounted volume */ |
491 | int dlm_launch_thread(struct dlm_ctxt *dlm) | 488 | int dlm_launch_thread(struct dlm_ctxt *dlm) |
492 | { | 489 | { |
493 | mlog(0, "starting dlm thread...\n"); | 490 | mlog(0, "Starting dlm_thread...\n"); |
494 | 491 | ||
495 | dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); | 492 | dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); |
496 | if (IS_ERR(dlm->dlm_thread_task)) { | 493 | if (IS_ERR(dlm->dlm_thread_task)) { |
@@ -505,7 +502,7 @@ int dlm_launch_thread(struct dlm_ctxt *dlm) | |||
505 | void dlm_complete_thread(struct dlm_ctxt *dlm) | 502 | void dlm_complete_thread(struct dlm_ctxt *dlm) |
506 | { | 503 | { |
507 | if (dlm->dlm_thread_task) { | 504 | if (dlm->dlm_thread_task) { |
508 | mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); | 505 | mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n"); |
509 | kthread_stop(dlm->dlm_thread_task); | 506 | kthread_stop(dlm->dlm_thread_task); |
510 | dlm->dlm_thread_task = NULL; | 507 | dlm->dlm_thread_task = NULL; |
511 | } | 508 | } |
@@ -536,7 +533,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
536 | /* get an extra ref on lock */ | 533 | /* get an extra ref on lock */ |
537 | dlm_lock_get(lock); | 534 | dlm_lock_get(lock); |
538 | res = lock->lockres; | 535 | res = lock->lockres; |
539 | mlog(0, "delivering an ast for this lockres\n"); | 536 | mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, " |
537 | "node %u\n", dlm->name, res->lockname.len, | ||
538 | res->lockname.name, | ||
539 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
540 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
541 | lock->ml.type, lock->ml.node); | ||
540 | 542 | ||
541 | BUG_ON(!lock->ast_pending); | 543 | BUG_ON(!lock->ast_pending); |
542 | 544 | ||
@@ -557,9 +559,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
557 | /* possible that another ast was queued while | 559 | /* possible that another ast was queued while |
558 | * we were delivering the last one */ | 560 | * we were delivering the last one */ |
559 | if (!list_empty(&lock->ast_list)) { | 561 | if (!list_empty(&lock->ast_list)) { |
560 | mlog(0, "aha another ast got queued while " | 562 | mlog(0, "%s: res %.*s, AST queued while flushing last " |
561 | "we were finishing the last one. will " | 563 | "one\n", dlm->name, res->lockname.len, |
562 | "keep the ast_pending flag set.\n"); | 564 | res->lockname.name); |
563 | } else | 565 | } else |
564 | lock->ast_pending = 0; | 566 | lock->ast_pending = 0; |
565 | 567 | ||
@@ -590,8 +592,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
590 | dlm_lock_put(lock); | 592 | dlm_lock_put(lock); |
591 | spin_unlock(&dlm->ast_lock); | 593 | spin_unlock(&dlm->ast_lock); |
592 | 594 | ||
593 | mlog(0, "delivering a bast for this lockres " | 595 | mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, " |
594 | "(blocked = %d\n", hi); | 596 | "blocked %d, node %u\n", |
597 | dlm->name, res->lockname.len, res->lockname.name, | ||
598 | dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), | ||
599 | dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), | ||
600 | hi, lock->ml.node); | ||
595 | 601 | ||
596 | if (lock->ml.node != dlm->node_num) { | 602 | if (lock->ml.node != dlm->node_num) { |
597 | ret = dlm_send_proxy_bast(dlm, res, lock, hi); | 603 | ret = dlm_send_proxy_bast(dlm, res, lock, hi); |
@@ -605,9 +611,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) | |||
605 | /* possible that another bast was queued while | 611 | /* possible that another bast was queued while |
606 | * we were delivering the last one */ | 612 | * we were delivering the last one */ |
607 | if (!list_empty(&lock->bast_list)) { | 613 | if (!list_empty(&lock->bast_list)) { |
608 | mlog(0, "aha another bast got queued while " | 614 | mlog(0, "%s: res %.*s, BAST queued while flushing last " |
609 | "we were finishing the last one. will " | 615 | "one\n", dlm->name, res->lockname.len, |
610 | "keep the bast_pending flag set.\n"); | 616 | res->lockname.name); |
611 | } else | 617 | } else |
612 | lock->bast_pending = 0; | 618 | lock->bast_pending = 0; |
613 | 619 | ||
@@ -675,11 +681,12 @@ static int dlm_thread(void *data) | |||
675 | spin_lock(&res->spinlock); | 681 | spin_lock(&res->spinlock); |
676 | if (res->owner != dlm->node_num) { | 682 | if (res->owner != dlm->node_num) { |
677 | __dlm_print_one_lock_resource(res); | 683 | __dlm_print_one_lock_resource(res); |
678 | mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", | 684 | mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d," |
679 | res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", | 685 | " dirty %d\n", dlm->name, |
680 | res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", | 686 | !!(res->state & DLM_LOCK_RES_IN_PROGRESS), |
681 | res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", | 687 | !!(res->state & DLM_LOCK_RES_MIGRATING), |
682 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | 688 | !!(res->state & DLM_LOCK_RES_RECOVERING), |
689 | !!(res->state & DLM_LOCK_RES_DIRTY)); | ||
683 | } | 690 | } |
684 | BUG_ON(res->owner != dlm->node_num); | 691 | BUG_ON(res->owner != dlm->node_num); |
685 | 692 | ||
@@ -693,8 +700,8 @@ static int dlm_thread(void *data) | |||
693 | res->state &= ~DLM_LOCK_RES_DIRTY; | 700 | res->state &= ~DLM_LOCK_RES_DIRTY; |
694 | spin_unlock(&res->spinlock); | 701 | spin_unlock(&res->spinlock); |
695 | spin_unlock(&dlm->ast_lock); | 702 | spin_unlock(&dlm->ast_lock); |
696 | mlog(0, "delaying list shuffling for in-" | 703 | mlog(0, "%s: res %.*s, inprogress, delay list " |
697 | "progress lockres %.*s, state=%d\n", | 704 | "shuffle, state %d\n", dlm->name, |
698 | res->lockname.len, res->lockname.name, | 705 | res->lockname.len, res->lockname.name, |
699 | res->state); | 706 | res->state); |
700 | delay = 1; | 707 | delay = 1; |
@@ -706,10 +713,6 @@ static int dlm_thread(void *data) | |||
706 | * spinlock and do NOT have the dlm lock. | 713 | * spinlock and do NOT have the dlm lock. |
707 | * safe to reserve/queue asts and run the lists. */ | 714 | * safe to reserve/queue asts and run the lists. */ |
708 | 715 | ||
709 | mlog(0, "calling dlm_shuffle_lists with dlm=%s, " | ||
710 | "res=%.*s\n", dlm->name, | ||
711 | res->lockname.len, res->lockname.name); | ||
712 | |||
713 | /* called while holding lockres lock */ | 716 | /* called while holding lockres lock */ |
714 | dlm_shuffle_lists(dlm, res); | 717 | dlm_shuffle_lists(dlm, res); |
715 | res->state &= ~DLM_LOCK_RES_DIRTY; | 718 | res->state &= ~DLM_LOCK_RES_DIRTY; |
@@ -733,7 +736,8 @@ in_progress: | |||
733 | /* unlikely, but we may need to give time to | 736 | /* unlikely, but we may need to give time to |
734 | * other tasks */ | 737 | * other tasks */ |
735 | if (!--n) { | 738 | if (!--n) { |
736 | mlog(0, "throttling dlm_thread\n"); | 739 | mlog(0, "%s: Throttling dlm thread\n", |
740 | dlm->name); | ||
737 | break; | 741 | break; |
738 | } | 742 | } |
739 | } | 743 | } |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index d14cad6e2e41..30c523144452 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -1017,8 +1017,11 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, | |||
1017 | * An error return must mean that no cluster locks | 1017 | * An error return must mean that no cluster locks |
1018 | * were held on function exit. | 1018 | * were held on function exit. |
1019 | */ | 1019 | */ |
1020 | if (oi1->ip_blkno != oi2->ip_blkno) | 1020 | if (oi1->ip_blkno != oi2->ip_blkno) { |
1021 | ocfs2_inode_unlock(inode2, 1); | 1021 | ocfs2_inode_unlock(inode2, 1); |
1022 | brelse(*bh2); | ||
1023 | *bh2 = NULL; | ||
1024 | } | ||
1022 | 1025 | ||
1023 | if (status != -ENOENT) | 1026 | if (status != -ENOENT) |
1024 | mlog_errno(status); | 1027 | mlog_errno(status); |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 70dd3b1798f1..51cd6898e7f1 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -420,6 +420,11 @@ struct ocfs2_super | |||
420 | struct inode *osb_tl_inode; | 420 | struct inode *osb_tl_inode; |
421 | struct buffer_head *osb_tl_bh; | 421 | struct buffer_head *osb_tl_bh; |
422 | struct delayed_work osb_truncate_log_wq; | 422 | struct delayed_work osb_truncate_log_wq; |
423 | /* | ||
424 | * How many clusters in our truncate log. | ||
425 | * It must be protected by osb_tl_inode->i_mutex. | ||
426 | */ | ||
427 | unsigned int truncated_clusters; | ||
423 | 428 | ||
424 | struct ocfs2_node_map osb_recovering_orphan_dirs; | 429 | struct ocfs2_node_map osb_recovering_orphan_dirs; |
425 | unsigned int *osb_orphan_wipes; | 430 | unsigned int *osb_orphan_wipes; |