summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/locking.c
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2019-04-15 09:15:24 -0400
committerDavid Sterba <dsterba@suse.com>2019-04-29 13:02:43 -0400
commit34e73cc930a8677426c9cbffdd3421e18f32e79f (patch)
treefa697652fe3dc42fc3e4afceeb047355c8710e5e /fs/btrfs/locking.c
parent74f657d89c6734c260509338e88ad6d5f5a24e1d (diff)
btrfs: trace: Introduce trace events for sleepable tree lock
There are two tree lock events which can sleep: - btrfs_tree_read_lock() - btrfs_tree_lock() Sometimes we may need to look into the concurrency picture of the fs. For that case, we need the execution time of above two functions and the owner of @eb. Here we introduce a trace events for user space tools like bcc, to get the execution time of above two functions, and get detailed owner info where eBPF code can't. All the overhead is hidden behind the trace events, so if events are not enabled, there is no overhead. These trace events also output bytenr and generation, allow them to be pared with unlock events to pin down deadlock. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r--fs/btrfs/locking.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 6df03ba36026..67b77f1d113e 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -158,6 +158,10 @@ void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
158 */ 158 */
159void btrfs_tree_read_lock(struct extent_buffer *eb) 159void btrfs_tree_read_lock(struct extent_buffer *eb)
160{ 160{
161 u64 start_ns = 0;
162
163 if (trace_btrfs_tree_read_lock_enabled())
164 start_ns = ktime_get_ns();
161again: 165again:
162 BUG_ON(!atomic_read(&eb->blocking_writers) && 166 BUG_ON(!atomic_read(&eb->blocking_writers) &&
163 current->pid == eb->lock_owner); 167 current->pid == eb->lock_owner);
@@ -174,6 +178,7 @@ again:
174 BUG_ON(eb->lock_nested); 178 BUG_ON(eb->lock_nested);
175 eb->lock_nested = true; 179 eb->lock_nested = true;
176 read_unlock(&eb->lock); 180 read_unlock(&eb->lock);
181 trace_btrfs_tree_read_lock(eb, start_ns);
177 return; 182 return;
178 } 183 }
179 if (atomic_read(&eb->blocking_writers)) { 184 if (atomic_read(&eb->blocking_writers)) {
@@ -184,6 +189,7 @@ again:
184 } 189 }
185 btrfs_assert_tree_read_locks_get(eb); 190 btrfs_assert_tree_read_locks_get(eb);
186 btrfs_assert_spinning_readers_get(eb); 191 btrfs_assert_spinning_readers_get(eb);
192 trace_btrfs_tree_read_lock(eb, start_ns);
187} 193}
188 194
189/* 195/*
@@ -299,6 +305,11 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
299 */ 305 */
300void btrfs_tree_lock(struct extent_buffer *eb) 306void btrfs_tree_lock(struct extent_buffer *eb)
301{ 307{
308 u64 start_ns = 0;
309
310 if (trace_btrfs_tree_lock_enabled())
311 start_ns = ktime_get_ns();
312
302 WARN_ON(eb->lock_owner == current->pid); 313 WARN_ON(eb->lock_owner == current->pid);
303again: 314again:
304 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); 315 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
@@ -312,6 +323,7 @@ again:
312 btrfs_assert_spinning_writers_get(eb); 323 btrfs_assert_spinning_writers_get(eb);
313 btrfs_assert_tree_write_locks_get(eb); 324 btrfs_assert_tree_write_locks_get(eb);
314 eb->lock_owner = current->pid; 325 eb->lock_owner = current->pid;
326 trace_btrfs_tree_lock(eb, start_ns);
315} 327}
316 328
317/* 329/*