aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorQu Wenruo <quwenruo@cn.fujitsu.com>2014-02-27 21:46:03 -0500
committerJosef Bacik <jbacik@fb.com>2014-03-10 15:17:03 -0400
commit08a9ff3264181986d1d692a4e6fce3669700c9f8 (patch)
tree04597c6b7b48a233a4acbcc86f017f862f123374
parentf5961d41d7575faa6e2905daa08650aa388ba9d0 (diff)
btrfs: Added btrfs_workqueue_struct implemented ordered execution based on kernel workqueue
Use kernel workqueue to implement a new btrfs_workqueue_struct, which has the ordering execution feature like the btrfs_worker. The func is executed in a concurrency way, and the ordred_func/ordered_free is executed in the sequence them are queued after the corresponding func is done. The new btrfs_workqueue works much like the original one, one workqueue for normal work and a list for ordered work. When a work is queued, ordered work will be added to the list and helper function will be queued into the workqueue. The helper function will execute a normal work and then check and execute as many ordered work as possible in the sequence they were queued. At this patch, high priority work queue or thresholding is not added yet. The high priority feature and thresholding will be added in the following patches. Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com> Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Tested-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fb.com>
-rw-r--r--fs/btrfs/async-thread.c137
-rw-r--r--fs/btrfs/async-thread.h27
2 files changed, 164 insertions, 0 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 0b78bf28ff5d..905de02e4386 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007 Oracle. All rights reserved. 2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
3 * 4 *
4 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public 6 * modify it under the terms of the GNU General Public
@@ -21,6 +22,7 @@
21#include <linux/list.h> 22#include <linux/list.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
23#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <linux/workqueue.h>
24#include "async-thread.h" 26#include "async-thread.h"
25 27
26#define WORK_QUEUED_BIT 0 28#define WORK_QUEUED_BIT 0
@@ -727,3 +729,138 @@ void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
727 wake_up_process(worker->task); 729 wake_up_process(worker->task);
728 spin_unlock_irqrestore(&worker->lock, flags); 730 spin_unlock_irqrestore(&worker->lock, flags);
729} 731}
732
733struct btrfs_workqueue_struct {
734 struct workqueue_struct *normal_wq;
735 /* List head pointing to ordered work list */
736 struct list_head ordered_list;
737
738 /* Spinlock for ordered_list */
739 spinlock_t list_lock;
740};
741
742struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
743 int flags,
744 int max_active)
745{
746 struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
747
748 if (unlikely(!ret))
749 return NULL;
750
751 ret->normal_wq = alloc_workqueue("%s-%s", flags, max_active,
752 "btrfs", name);
753 if (unlikely(!ret->normal_wq)) {
754 kfree(ret);
755 return NULL;
756 }
757
758 INIT_LIST_HEAD(&ret->ordered_list);
759 spin_lock_init(&ret->list_lock);
760 return ret;
761}
762
763static void run_ordered_work(struct btrfs_workqueue_struct *wq)
764{
765 struct list_head *list = &wq->ordered_list;
766 struct btrfs_work_struct *work;
767 spinlock_t *lock = &wq->list_lock;
768 unsigned long flags;
769
770 while (1) {
771 spin_lock_irqsave(lock, flags);
772 if (list_empty(list))
773 break;
774 work = list_entry(list->next, struct btrfs_work_struct,
775 ordered_list);
776 if (!test_bit(WORK_DONE_BIT, &work->flags))
777 break;
778
779 /*
780 * we are going to call the ordered done function, but
781 * we leave the work item on the list as a barrier so
782 * that later work items that are done don't have their
783 * functions called before this one returns
784 */
785 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
786 break;
787 spin_unlock_irqrestore(lock, flags);
788 work->ordered_func(work);
789
790 /* now take the lock again and drop our item from the list */
791 spin_lock_irqsave(lock, flags);
792 list_del(&work->ordered_list);
793 spin_unlock_irqrestore(lock, flags);
794
795 /*
796 * we don't want to call the ordered free functions
797 * with the lock held though
798 */
799 work->ordered_free(work);
800 }
801 spin_unlock_irqrestore(lock, flags);
802}
803
804static void normal_work_helper(struct work_struct *arg)
805{
806 struct btrfs_work_struct *work;
807 struct btrfs_workqueue_struct *wq;
808 int need_order = 0;
809
810 work = container_of(arg, struct btrfs_work_struct, normal_work);
811 /*
812 * We should not touch things inside work in the following cases:
813 * 1) after work->func() if it has no ordered_free
814 * Since the struct is freed in work->func().
815 * 2) after setting WORK_DONE_BIT
816 * The work may be freed in other threads almost instantly.
817 * So we save the needed things here.
818 */
819 if (work->ordered_func)
820 need_order = 1;
821 wq = work->wq;
822
823 work->func(work);
824 if (need_order) {
825 set_bit(WORK_DONE_BIT, &work->flags);
826 run_ordered_work(wq);
827 }
828}
829
830void btrfs_init_work(struct btrfs_work_struct *work,
831 void (*func)(struct btrfs_work_struct *),
832 void (*ordered_func)(struct btrfs_work_struct *),
833 void (*ordered_free)(struct btrfs_work_struct *))
834{
835 work->func = func;
836 work->ordered_func = ordered_func;
837 work->ordered_free = ordered_free;
838 INIT_WORK(&work->normal_work, normal_work_helper);
839 INIT_LIST_HEAD(&work->ordered_list);
840 work->flags = 0;
841}
842
843void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
844 struct btrfs_work_struct *work)
845{
846 unsigned long flags;
847
848 work->wq = wq;
849 if (work->ordered_func) {
850 spin_lock_irqsave(&wq->list_lock, flags);
851 list_add_tail(&work->ordered_list, &wq->ordered_list);
852 spin_unlock_irqrestore(&wq->list_lock, flags);
853 }
854 queue_work(wq->normal_wq, &work->normal_work);
855}
856
857void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
858{
859 destroy_workqueue(wq->normal_wq);
860 kfree(wq);
861}
862
863void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
864{
865 workqueue_set_max_active(wq->normal_wq, max);
866}
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 1f26792683ed..9d8da53f6dd9 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007 Oracle. All rights reserved. 2 * Copyright (C) 2007 Oracle. All rights reserved.
3 * Copyright (C) 2014 Fujitsu. All rights reserved.
3 * 4 *
4 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public 6 * modify it under the terms of the GNU General Public
@@ -118,4 +119,30 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
118 struct btrfs_workers *async_starter); 119 struct btrfs_workers *async_starter);
119void btrfs_requeue_work(struct btrfs_work *work); 120void btrfs_requeue_work(struct btrfs_work *work);
120void btrfs_set_work_high_prio(struct btrfs_work *work); 121void btrfs_set_work_high_prio(struct btrfs_work *work);
122
123struct btrfs_workqueue_struct;
124
125struct btrfs_work_struct {
126 void (*func)(struct btrfs_work_struct *arg);
127 void (*ordered_func)(struct btrfs_work_struct *arg);
128 void (*ordered_free)(struct btrfs_work_struct *arg);
129
130 /* Don't touch things below */
131 struct work_struct normal_work;
132 struct list_head ordered_list;
133 struct btrfs_workqueue_struct *wq;
134 unsigned long flags;
135};
136
137struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
138 int flags,
139 int max_active);
140void btrfs_init_work(struct btrfs_work_struct *work,
141 void (*func)(struct btrfs_work_struct *),
142 void (*ordered_func)(struct btrfs_work_struct *),
143 void (*ordered_free)(struct btrfs_work_struct *));
144void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
145 struct btrfs_work_struct *work);
146void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq);
147void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max);
121#endif 148#endif