aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-09 16:22:03 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-09 16:22:03 -0500
commit284b066af41579f62649048fdec5c5e7091703e6 (patch)
treee34185c911cb50b0ade04f804056ffbe2a6e04ae /fs/btrfs
parent42f15d77df8a7e8a2feb15041d5d30710ee7f951 (diff)
Btrfs: don't use spin_is_contended
Btrfs was using spin_is_contended to see if it should drop locks before doing extent allocations during btrfs_search_slot. The idea was to avoid expensive searches in the tree unless the lock was actually contended. But, spin_is_contended is specific to the ticket spinlocks on x86, so this is causing compile errors everywhere else. In practice, the contention could easily appear some time after we started doing the extent allocation, and it makes more sense to always drop the lock instead. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.c3
-rw-r--r--fs/btrfs/locking.c22
-rw-r--r--fs/btrfs/locking.h2
3 files changed, 1 insertions, 26 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 551177c0011a..35443cc4b9a9 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1530,8 +1530,7 @@ again:
1530 * for higher level blocks, try not to allocate blocks 1530 * for higher level blocks, try not to allocate blocks
1531 * with the block and the parent locks held. 1531 * with the block and the parent locks held.
1532 */ 1532 */
1533 if (level > 0 && !prealloc_block.objectid && 1533 if (level > 0 && !prealloc_block.objectid) {
1534 btrfs_path_lock_waiting(p, level)) {
1535 u32 size = b->len; 1534 u32 size = b->len;
1536 u64 hint = b->start; 1535 u64 hint = b->start;
1537 1536
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 68fd9ccf1805..9ebe9385129b 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -236,25 +236,3 @@ int btrfs_tree_locked(struct extent_buffer *eb)
236 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) || 236 return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) ||
237 spin_is_locked(&eb->lock); 237 spin_is_locked(&eb->lock);
238} 238}
239
240/*
241 * btrfs_search_slot uses this to decide if it should drop its locks
242 * before doing something expensive like allocating free blocks for cow.
243 */
244int btrfs_path_lock_waiting(struct btrfs_path *path, int level)
245{
246 int i;
247 struct extent_buffer *eb;
248
249 for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) {
250 eb = path->nodes[i];
251 if (!eb)
252 break;
253 smp_mb();
254 if (spin_is_contended(&eb->lock) ||
255 waitqueue_active(&eb->lock_wq))
256 return 1;
257 }
258 return 0;
259}
260
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index d92e707f5870..6bb0afbff928 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -26,8 +26,6 @@ int btrfs_tree_locked(struct extent_buffer *eb);
26int btrfs_try_tree_lock(struct extent_buffer *eb); 26int btrfs_try_tree_lock(struct extent_buffer *eb);
27int btrfs_try_spin_lock(struct extent_buffer *eb); 27int btrfs_try_spin_lock(struct extent_buffer *eb);
28 28
29int btrfs_path_lock_waiting(struct btrfs_path *path, int level);
30
31void btrfs_set_lock_blocking(struct extent_buffer *eb); 29void btrfs_set_lock_blocking(struct extent_buffer *eb);
32void btrfs_clear_lock_blocking(struct extent_buffer *eb); 30void btrfs_clear_lock_blocking(struct extent_buffer *eb);
33#endif 31#endif