aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@fusionio.com>2012-07-25 15:57:13 -0400
committerChris Mason <chris.mason@fusionio.com>2012-07-25 16:15:07 -0400
commite9fbcb42201c862fd6ab45c48ead4f47bb2dea9d (patch)
tree17173067ddf7fcca490f3588a7f33ebeb1d4e366 /fs/btrfs
parent2b0ce2c2909368d124a78a88e5c7106fdcba6221 (diff)
Btrfs: call the ordered free operation without any locks held
Each ordered operation has a free callback, and this was called with the worker spinlock held. Josef made the free callback also call iput, which we can't do with the spinlock. This drops the spinlock for the free operation and grabs it again before moving through the rest of the list. We'll circle back around to this and find a cleaner way that doesn't bounce the lock around so much. Signed-off-by: Chris Mason <chris.mason@fusionio.com> cc: stable@kernel.org
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/async-thread.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 42704149b723..58b7d14b08ee 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -206,10 +206,17 @@ static noinline void run_ordered_completions(struct btrfs_workers *workers,
206 206
207 work->ordered_func(work); 207 work->ordered_func(work);
208 208
209 /* now take the lock again and call the freeing code */ 209 /* now take the lock again and drop our item from the list */
210 spin_lock(&workers->order_lock); 210 spin_lock(&workers->order_lock);
211 list_del(&work->order_list); 211 list_del(&work->order_list);
212 spin_unlock(&workers->order_lock);
213
214 /*
215 * we don't want to call the ordered free functions
216 * with the lock held though
217 */
212 work->ordered_free(work); 218 work->ordered_free(work);
219 spin_lock(&workers->order_lock);
213 } 220 }
214 221
215 spin_unlock(&workers->order_lock); 222 spin_unlock(&workers->order_lock);