aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--include/trace/events/btrfs.h22
2 files changed, 24 insertions, 13 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 63d197724519..ff0b0be92d61 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
273 unsigned long flags; 273 unsigned long flags;
274 274
275 while (1) { 275 while (1) {
276 void *wtag;
277
276 spin_lock_irqsave(lock, flags); 278 spin_lock_irqsave(lock, flags);
277 if (list_empty(list)) 279 if (list_empty(list))
278 break; 280 break;
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
299 spin_unlock_irqrestore(lock, flags); 301 spin_unlock_irqrestore(lock, flags);
300 302
301 /* 303 /*
302 * we don't want to call the ordered free functions 304 * We don't want to call the ordered free functions with the
303 * with the lock held though 305 * lock held though. Save the work as tag for the trace event,
306 * because the callback could free the structure.
304 */ 307 */
308 wtag = work;
305 work->ordered_free(work); 309 work->ordered_free(work);
306 trace_btrfs_all_work_done(work); 310 trace_btrfs_all_work_done(wq->fs_info, wtag);
307 } 311 }
308 spin_unlock_irqrestore(lock, flags); 312 spin_unlock_irqrestore(lock, flags);
309} 313}
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
311static void normal_work_helper(struct btrfs_work *work) 315static void normal_work_helper(struct btrfs_work *work)
312{ 316{
313 struct __btrfs_workqueue *wq; 317 struct __btrfs_workqueue *wq;
318 void *wtag;
314 int need_order = 0; 319 int need_order = 0;
315 320
316 /* 321 /*
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
324 if (work->ordered_func) 329 if (work->ordered_func)
325 need_order = 1; 330 need_order = 1;
326 wq = work->wq; 331 wq = work->wq;
332 /* Safe for tracepoints in case work gets freed by the callback */
333 wtag = work;
327 334
328 trace_btrfs_work_sched(work); 335 trace_btrfs_work_sched(work);
329 thresh_exec_hook(wq); 336 thresh_exec_hook(wq);
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
333 run_ordered_work(wq); 340 run_ordered_work(wq);
334 } 341 }
335 if (!need_order) 342 if (!need_order)
336 trace_btrfs_all_work_done(work); 343 trace_btrfs_all_work_done(wq->fs_info, wtag);
337} 344}
338 345
339void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, 346void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index e030d6f6c19a..6d7fe1169956 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1162,22 +1162,26 @@ DECLARE_EVENT_CLASS(btrfs__work,
1162 __entry->func, __entry->ordered_func, __entry->ordered_free) 1162 __entry->func, __entry->ordered_func, __entry->ordered_free)
1163); 1163);
1164 1164
1165/* For situiations that the work is freed */ 1165/*
1166 * For situiations when the work is freed, we pass fs_info and a tag that that
1167 * matches address of the work structure so it can be paired with the
1168 * scheduling event.
1169 */
1166DECLARE_EVENT_CLASS(btrfs__work__done, 1170DECLARE_EVENT_CLASS(btrfs__work__done,
1167 1171
1168 TP_PROTO(struct btrfs_work *work), 1172 TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
1169 1173
1170 TP_ARGS(work), 1174 TP_ARGS(fs_info, wtag),
1171 1175
1172 TP_STRUCT__entry_btrfs( 1176 TP_STRUCT__entry_btrfs(
1173 __field( void *, work ) 1177 __field( void *, wtag )
1174 ), 1178 ),
1175 1179
1176 TP_fast_assign_btrfs(btrfs_work_owner(work), 1180 TP_fast_assign_btrfs(fs_info,
1177 __entry->work = work; 1181 __entry->wtag = wtag;
1178 ), 1182 ),
1179 1183
1180 TP_printk_btrfs("work->%p", __entry->work) 1184 TP_printk_btrfs("work->%p", __entry->wtag)
1181); 1185);
1182 1186
1183DEFINE_EVENT(btrfs__work, btrfs_work_queued, 1187DEFINE_EVENT(btrfs__work, btrfs_work_queued,
@@ -1196,9 +1200,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
1196 1200
1197DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done, 1201DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
1198 1202
1199 TP_PROTO(struct btrfs_work *work), 1203 TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
1200 1204
1201 TP_ARGS(work) 1205 TP_ARGS(fs_info, wtag)
1202); 1206);
1203 1207
1204DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, 1208DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,