diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/async-thread.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 63d197724519..ff0b0be92d61 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) | |||
273 | unsigned long flags; | 273 | unsigned long flags; |
274 | 274 | ||
275 | while (1) { | 275 | while (1) { |
276 | void *wtag; | ||
277 | |||
276 | spin_lock_irqsave(lock, flags); | 278 | spin_lock_irqsave(lock, flags); |
277 | if (list_empty(list)) | 279 | if (list_empty(list)) |
278 | break; | 280 | break; |
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) | |||
299 | spin_unlock_irqrestore(lock, flags); | 301 | spin_unlock_irqrestore(lock, flags); |
300 | 302 | ||
301 | /* | 303 | /* |
302 | * we don't want to call the ordered free functions | 304 | * We don't want to call the ordered free functions with the |
303 | * with the lock held though | 305 | * lock held though. Save the work as tag for the trace event, |
306 | * because the callback could free the structure. | ||
304 | */ | 307 | */ |
308 | wtag = work; | ||
305 | work->ordered_free(work); | 309 | work->ordered_free(work); |
306 | trace_btrfs_all_work_done(work); | 310 | trace_btrfs_all_work_done(wq->fs_info, wtag); |
307 | } | 311 | } |
308 | spin_unlock_irqrestore(lock, flags); | 312 | spin_unlock_irqrestore(lock, flags); |
309 | } | 313 | } |
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) | |||
311 | static void normal_work_helper(struct btrfs_work *work) | 315 | static void normal_work_helper(struct btrfs_work *work) |
312 | { | 316 | { |
313 | struct __btrfs_workqueue *wq; | 317 | struct __btrfs_workqueue *wq; |
318 | void *wtag; | ||
314 | int need_order = 0; | 319 | int need_order = 0; |
315 | 320 | ||
316 | /* | 321 | /* |
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work) | |||
324 | if (work->ordered_func) | 329 | if (work->ordered_func) |
325 | need_order = 1; | 330 | need_order = 1; |
326 | wq = work->wq; | 331 | wq = work->wq; |
332 | /* Safe for tracepoints in case work gets freed by the callback */ | ||
333 | wtag = work; | ||
327 | 334 | ||
328 | trace_btrfs_work_sched(work); | 335 | trace_btrfs_work_sched(work); |
329 | thresh_exec_hook(wq); | 336 | thresh_exec_hook(wq); |
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work) | |||
333 | run_ordered_work(wq); | 340 | run_ordered_work(wq); |
334 | } | 341 | } |
335 | if (!need_order) | 342 | if (!need_order) |
336 | trace_btrfs_all_work_done(work); | 343 | trace_btrfs_all_work_done(wq->fs_info, wtag); |
337 | } | 344 | } |
338 | 345 | ||
339 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, | 346 | void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, |