aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2014-03-05 23:19:50 -0500
committerJosef Bacik <jbacik@fb.com>2014-03-10 15:17:21 -0400
commit52483bc26f0e95c91e8fd07f9def588bf89664f8 (patch)
tree49c742d358bafac2c5339715524a9419915cdaa0 /fs/btrfs/async-thread.c
parent6db8914f9763d3f0a7610b497d44f93a4c17e62e (diff)
btrfs: Add ftrace for btrfs_workqueue
Add ftrace for btrfs_workqueue for further workqueue tunning. This patch needs to applied after the workqueue replace patchset. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fb.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index d8c07e5c1f24..00623dd16b81 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -24,6 +24,7 @@
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <linux/workqueue.h> 25#include <linux/workqueue.h>
26#include "async-thread.h" 26#include "async-thread.h"
27#include "ctree.h"
27 28
28#define WORK_DONE_BIT 0 29#define WORK_DONE_BIT 0
29#define WORK_ORDER_DONE_BIT 1 30#define WORK_ORDER_DONE_BIT 1
@@ -210,6 +211,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
210 */ 211 */
211 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) 212 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
212 break; 213 break;
214 trace_btrfs_ordered_sched(work);
213 spin_unlock_irqrestore(lock, flags); 215 spin_unlock_irqrestore(lock, flags);
214 work->ordered_func(work); 216 work->ordered_func(work);
215 217
@@ -223,6 +225,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
223 * with the lock held though 225 * with the lock held though
224 */ 226 */
225 work->ordered_free(work); 227 work->ordered_free(work);
228 trace_btrfs_all_work_done(work);
226 } 229 }
227 spin_unlock_irqrestore(lock, flags); 230 spin_unlock_irqrestore(lock, flags);
228} 231}
@@ -246,12 +249,15 @@ static void normal_work_helper(struct work_struct *arg)
246 need_order = 1; 249 need_order = 1;
247 wq = work->wq; 250 wq = work->wq;
248 251
252 trace_btrfs_work_sched(work);
249 thresh_exec_hook(wq); 253 thresh_exec_hook(wq);
250 work->func(work); 254 work->func(work);
251 if (need_order) { 255 if (need_order) {
252 set_bit(WORK_DONE_BIT, &work->flags); 256 set_bit(WORK_DONE_BIT, &work->flags);
253 run_ordered_work(wq); 257 run_ordered_work(wq);
254 } 258 }
259 if (!need_order)
260 trace_btrfs_all_work_done(work);
255} 261}
256 262
257void btrfs_init_work(struct btrfs_work *work, 263void btrfs_init_work(struct btrfs_work *work,
@@ -280,6 +286,7 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
280 spin_unlock_irqrestore(&wq->list_lock, flags); 286 spin_unlock_irqrestore(&wq->list_lock, flags);
281 } 287 }
282 queue_work(wq->normal_wq, &work->normal_work); 288 queue_work(wq->normal_wq, &work->normal_work);
289 trace_btrfs_work_queued(work);
283} 290}
284 291
285void btrfs_queue_work(struct btrfs_workqueue *wq, 292void btrfs_queue_work(struct btrfs_workqueue *wq,