diff options
-rw-r--r-- | fs/btrfs/ctree.c | 14 | ||||
-rw-r--r-- | fs/btrfs/locking.c | 24 | ||||
-rw-r--r-- | fs/btrfs/locking.h | 2 |
3 files changed, 25 insertions, 15 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 19bc6162fb8e..150822ee0a0b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, | |||
80 | { | 80 | { |
81 | int i; | 81 | int i; |
82 | 82 | ||
83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
84 | /* lockdep really cares that we take all of these spinlocks | ||
85 | * in the right order. If any of the locks in the path are not | ||
86 | * currently blocking, it is going to complain. So, make really | ||
87 | * really sure by forcing the path to blocking before we clear | ||
88 | * the path blocking. | ||
89 | */ | ||
90 | if (held) { | 83 | if (held) { |
91 | btrfs_set_lock_blocking_rw(held, held_rw); | 84 | btrfs_set_lock_blocking_rw(held, held_rw); |
92 | if (held_rw == BTRFS_WRITE_LOCK) | 85 | if (held_rw == BTRFS_WRITE_LOCK) |
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, | |||
95 | held_rw = BTRFS_READ_LOCK_BLOCKING; | 88 | held_rw = BTRFS_READ_LOCK_BLOCKING; |
96 | } | 89 | } |
97 | btrfs_set_path_blocking(p); | 90 | btrfs_set_path_blocking(p); |
98 | #endif | ||
99 | 91 | ||
100 | for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { | 92 | for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { |
101 | if (p->nodes[i] && p->locks[i]) { | 93 | if (p->nodes[i] && p->locks[i]) { |
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, | |||
107 | } | 99 | } |
108 | } | 100 | } |
109 | 101 | ||
110 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
111 | if (held) | 102 | if (held) |
112 | btrfs_clear_lock_blocking_rw(held, held_rw); | 103 | btrfs_clear_lock_blocking_rw(held, held_rw); |
113 | #endif | ||
114 | } | 104 | } |
115 | 105 | ||
116 | /* this also releases the path */ | 106 | /* this also releases the path */ |
@@ -2893,7 +2883,7 @@ cow_done: | |||
2893 | } | 2883 | } |
2894 | p->locks[level] = BTRFS_WRITE_LOCK; | 2884 | p->locks[level] = BTRFS_WRITE_LOCK; |
2895 | } else { | 2885 | } else { |
2896 | err = btrfs_try_tree_read_lock(b); | 2886 | err = btrfs_tree_read_lock_atomic(b); |
2897 | if (!err) { | 2887 | if (!err) { |
2898 | btrfs_set_path_blocking(p); | 2888 | btrfs_set_path_blocking(p); |
2899 | btrfs_tree_read_lock(b); | 2889 | btrfs_tree_read_lock(b); |
@@ -3025,7 +3015,7 @@ again: | |||
3025 | } | 3015 | } |
3026 | 3016 | ||
3027 | level = btrfs_header_level(b); | 3017 | level = btrfs_header_level(b); |
3028 | err = btrfs_try_tree_read_lock(b); | 3018 | err = btrfs_tree_read_lock_atomic(b); |
3029 | if (!err) { | 3019 | if (!err) { |
3030 | btrfs_set_path_blocking(p); | 3020 | btrfs_set_path_blocking(p); |
3031 | btrfs_tree_read_lock(b); | 3021 | btrfs_tree_read_lock(b); |
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 5665d2149249..f8229ef1b46d 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -128,6 +128,26 @@ again: | |||
128 | } | 128 | } |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * take a spinning read lock. | ||
132 | * returns 1 if we get the read lock and 0 if we don't | ||
133 | * this won't wait for blocking writers | ||
134 | */ | ||
135 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) | ||
136 | { | ||
137 | if (atomic_read(&eb->blocking_writers)) | ||
138 | return 0; | ||
139 | |||
140 | read_lock(&eb->lock); | ||
141 | if (atomic_read(&eb->blocking_writers)) { | ||
142 | read_unlock(&eb->lock); | ||
143 | return 0; | ||
144 | } | ||
145 | atomic_inc(&eb->read_locks); | ||
146 | atomic_inc(&eb->spinning_readers); | ||
147 | return 1; | ||
148 | } | ||
149 | |||
150 | /* | ||
131 | * returns 1 if we get the read lock and 0 if we don't | 151 | * returns 1 if we get the read lock and 0 if we don't |
132 | * this won't wait for blocking writers | 152 | * this won't wait for blocking writers |
133 | */ | 153 | */ |
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
158 | atomic_read(&eb->blocking_readers)) | 178 | atomic_read(&eb->blocking_readers)) |
159 | return 0; | 179 | return 0; |
160 | 180 | ||
161 | if (!write_trylock(&eb->lock)) | 181 | write_lock(&eb->lock); |
162 | return 0; | ||
163 | |||
164 | if (atomic_read(&eb->blocking_writers) || | 182 | if (atomic_read(&eb->blocking_writers) || |
165 | atomic_read(&eb->blocking_readers)) { | 183 | atomic_read(&eb->blocking_readers)) { |
166 | write_unlock(&eb->lock); | 184 | write_unlock(&eb->lock); |
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index b81e0e9a4894..c44a9d5f5362 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h | |||
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); | |||
35 | void btrfs_assert_tree_locked(struct extent_buffer *eb); | 35 | void btrfs_assert_tree_locked(struct extent_buffer *eb); |
36 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); | 36 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); |
37 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); | 37 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); |
38 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); | ||
39 | |||
38 | 40 | ||
39 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) | 41 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) |
40 | { | 42 | { |