diff options
author | Chris Mason <clm@fb.com> | 2014-11-19 13:25:09 -0500 |
---|---|---|
committer | Chris Mason <clm@fb.com> | 2014-11-19 13:34:35 -0500 |
commit | f82c458a2c3ffb94b431fc6ad791a79df1b3713e (patch) | |
tree | 0eca3f95f74d1cde140366002b3cd794ce96f67c /fs/btrfs/locking.c | |
parent | 6e5aafb27419f32575b27ef9d6a31e5d54661aca (diff) |
btrfs: fix lockups from btrfs_clear_path_blocking
The fair reader/writer locks mean that btrfs_clear_path_blocking needs
to strictly follow lock ordering rules even when we already have
blocking locks on a given path.
Before we can clear a blocking lock on the path, we need to make sure
all of the locks have been converted to blocking. This will remove lock
inversions against anyone spinning in write_lock() against the buffers
we're trying to get read locks on. These inversions didn't exist before
the fair read/writer locks, but now we need to be more careful.
We papered over this deadlock in the past by changing
btrfs_try_read_lock() to be a true trylock against both the spinlock and
the blocking lock. This was slower, and not sufficient to fix all the
deadlocks. This patch adds a btrfs_tree_read_lock_atomic(), which
basically means get the spinlock but trylock on the blocking lock.
Signed-off-by: Chris Mason <clm@fb.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Reported-by: Patrick Schmid <schmid@phys.ethz.ch>
cc: stable@vger.kernel.org #v3.15+
Diffstat (limited to 'fs/btrfs/locking.c')
-rw-r--r-- | fs/btrfs/locking.c | 24 |
1 files changed, 21 insertions, 3 deletions
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 5665d2149249..f8229ef1b46d 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -128,6 +128,26 @@ again: | |||
128 | } | 128 | } |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * take a spinning read lock. | ||
132 | * returns 1 if we get the read lock and 0 if we don't | ||
133 | * this won't wait for blocking writers | ||
134 | */ | ||
135 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) | ||
136 | { | ||
137 | if (atomic_read(&eb->blocking_writers)) | ||
138 | return 0; | ||
139 | |||
140 | read_lock(&eb->lock); | ||
141 | if (atomic_read(&eb->blocking_writers)) { | ||
142 | read_unlock(&eb->lock); | ||
143 | return 0; | ||
144 | } | ||
145 | atomic_inc(&eb->read_locks); | ||
146 | atomic_inc(&eb->spinning_readers); | ||
147 | return 1; | ||
148 | } | ||
149 | |||
150 | /* | ||
131 | * returns 1 if we get the read lock and 0 if we don't | 151 | * returns 1 if we get the read lock and 0 if we don't |
132 | * this won't wait for blocking writers | 152 | * this won't wait for blocking writers |
133 | */ | 153 | */ |
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) | |||
158 | atomic_read(&eb->blocking_readers)) | 178 | atomic_read(&eb->blocking_readers)) |
159 | return 0; | 179 | return 0; |
160 | 180 | ||
161 | if (!write_trylock(&eb->lock)) | 181 | write_lock(&eb->lock); |
162 | return 0; | ||
163 | |||
164 | if (atomic_read(&eb->blocking_writers) || | 182 | if (atomic_read(&eb->blocking_writers) || |
165 | atomic_read(&eb->blocking_readers)) { | 183 | atomic_read(&eb->blocking_readers)) { |
166 | write_unlock(&eb->lock); | 184 | write_unlock(&eb->lock); |