aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c107
1 files changed, 99 insertions, 8 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 193c84964db9..977bce2ec887 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -30,6 +30,9 @@
30#define WORK_ORDER_DONE_BIT 2 30#define WORK_ORDER_DONE_BIT 2
31#define WORK_HIGH_PRIO_BIT 3 31#define WORK_HIGH_PRIO_BIT 3
32 32
33#define NO_THRESHOLD (-1)
34#define DFT_THRESHOLD (32)
35
33/* 36/*
34 * container for the kthread task pointer and the list of pending work 37 * container for the kthread task pointer and the list of pending work
35 * One of these is allocated per thread. 38 * One of these is allocated per thread.
@@ -737,6 +740,14 @@ struct __btrfs_workqueue_struct {
737 740
738 /* Spinlock for ordered_list */ 741 /* Spinlock for ordered_list */
739 spinlock_t list_lock; 742 spinlock_t list_lock;
743
744 /* Thresholding related variants */
745 atomic_t pending;
746 int max_active;
747 int current_max;
748 int thresh;
749 unsigned int count;
750 spinlock_t thres_lock;
740}; 751};
741 752
742struct btrfs_workqueue_struct { 753struct btrfs_workqueue_struct {
@@ -745,19 +756,34 @@ struct btrfs_workqueue_struct {
745}; 756};
746 757
747static inline struct __btrfs_workqueue_struct 758static inline struct __btrfs_workqueue_struct
748*__btrfs_alloc_workqueue(char *name, int flags, int max_active) 759*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
749{ 760{
750 struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS); 761 struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
751 762
752 if (unlikely(!ret)) 763 if (unlikely(!ret))
753 return NULL; 764 return NULL;
754 765
766 ret->max_active = max_active;
767 atomic_set(&ret->pending, 0);
768 if (thresh == 0)
769 thresh = DFT_THRESHOLD;
770 /* For low threshold, disabling threshold is a better choice */
771 if (thresh < DFT_THRESHOLD) {
772 ret->current_max = max_active;
773 ret->thresh = NO_THRESHOLD;
774 } else {
775 ret->current_max = 1;
776 ret->thresh = thresh;
777 }
778
755 if (flags & WQ_HIGHPRI) 779 if (flags & WQ_HIGHPRI)
756 ret->normal_wq = alloc_workqueue("%s-%s-high", flags, 780 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
757 max_active, "btrfs", name); 781 ret->max_active,
782 "btrfs", name);
758 else 783 else
759 ret->normal_wq = alloc_workqueue("%s-%s", flags, 784 ret->normal_wq = alloc_workqueue("%s-%s", flags,
760 max_active, "btrfs", name); 785 ret->max_active, "btrfs",
786 name);
761 if (unlikely(!ret->normal_wq)) { 787 if (unlikely(!ret->normal_wq)) {
762 kfree(ret); 788 kfree(ret);
763 return NULL; 789 return NULL;
@@ -765,6 +791,7 @@ static inline struct __btrfs_workqueue_struct
765 791
766 INIT_LIST_HEAD(&ret->ordered_list); 792 INIT_LIST_HEAD(&ret->ordered_list);
767 spin_lock_init(&ret->list_lock); 793 spin_lock_init(&ret->list_lock);
794 spin_lock_init(&ret->thres_lock);
768 return ret; 795 return ret;
769} 796}
770 797
@@ -773,7 +800,8 @@ __btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq);
773 800
774struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name, 801struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
775 int flags, 802 int flags,
776 int max_active) 803 int max_active,
804 int thresh)
777{ 805{
778 struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS); 806 struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
779 807
@@ -781,14 +809,15 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
781 return NULL; 809 return NULL;
782 810
783 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI, 811 ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
784 max_active); 812 max_active, thresh);
785 if (unlikely(!ret->normal)) { 813 if (unlikely(!ret->normal)) {
786 kfree(ret); 814 kfree(ret);
787 return NULL; 815 return NULL;
788 } 816 }
789 817
790 if (flags & WQ_HIGHPRI) { 818 if (flags & WQ_HIGHPRI) {
791 ret->high = __btrfs_alloc_workqueue(name, flags, max_active); 819 ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
820 thresh);
792 if (unlikely(!ret->high)) { 821 if (unlikely(!ret->high)) {
793 __btrfs_destroy_workqueue(ret->normal); 822 __btrfs_destroy_workqueue(ret->normal);
794 kfree(ret); 823 kfree(ret);
@@ -798,6 +827,66 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
798 return ret; 827 return ret;
799} 828}
800 829
830/*
831 * Hook for threshold which will be called in btrfs_queue_work.
832 * This hook WILL be called in IRQ handler context,
833 * so workqueue_set_max_active MUST NOT be called in this hook
834 */
835static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq)
836{
837 if (wq->thresh == NO_THRESHOLD)
838 return;
839 atomic_inc(&wq->pending);
840}
841
842/*
843 * Hook for threshold which will be called before executing the work,
844 * This hook is called in kthread content.
845 * So workqueue_set_max_active is called here.
846 */
847static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq)
848{
849 int new_max_active;
850 long pending;
851 int need_change = 0;
852
853 if (wq->thresh == NO_THRESHOLD)
854 return;
855
856 atomic_dec(&wq->pending);
857 spin_lock(&wq->thres_lock);
858 /*
859 * Use wq->count to limit the calling frequency of
860 * workqueue_set_max_active.
861 */
862 wq->count++;
863 wq->count %= (wq->thresh / 4);
864 if (!wq->count)
865 goto out;
866 new_max_active = wq->current_max;
867
868 /*
869 * pending may be changed later, but it's OK since we really
870 * don't need it so accurate to calculate new_max_active.
871 */
872 pending = atomic_read(&wq->pending);
873 if (pending > wq->thresh)
874 new_max_active++;
875 if (pending < wq->thresh / 2)
876 new_max_active--;
877 new_max_active = clamp_val(new_max_active, 1, wq->max_active);
878 if (new_max_active != wq->current_max) {
879 need_change = 1;
880 wq->current_max = new_max_active;
881 }
882out:
883 spin_unlock(&wq->thres_lock);
884
885 if (need_change) {
886 workqueue_set_max_active(wq->normal_wq, wq->current_max);
887 }
888}
889
801static void run_ordered_work(struct __btrfs_workqueue_struct *wq) 890static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
802{ 891{
803 struct list_head *list = &wq->ordered_list; 892 struct list_head *list = &wq->ordered_list;
@@ -858,6 +947,7 @@ static void normal_work_helper(struct work_struct *arg)
858 need_order = 1; 947 need_order = 1;
859 wq = work->wq; 948 wq = work->wq;
860 949
950 thresh_exec_hook(wq);
861 work->func(work); 951 work->func(work);
862 if (need_order) { 952 if (need_order) {
863 set_bit(WORK_DONE_BIT, &work->flags); 953 set_bit(WORK_DONE_BIT, &work->flags);
@@ -884,6 +974,7 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
884 unsigned long flags; 974 unsigned long flags;
885 975
886 work->wq = wq; 976 work->wq = wq;
977 thresh_queue_hook(wq);
887 if (work->ordered_func) { 978 if (work->ordered_func) {
888 spin_lock_irqsave(&wq->list_lock, flags); 979 spin_lock_irqsave(&wq->list_lock, flags);
889 list_add_tail(&work->ordered_list, &wq->ordered_list); 980 list_add_tail(&work->ordered_list, &wq->ordered_list);
@@ -922,9 +1013,9 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
922 1013
923void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max) 1014void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
924{ 1015{
925 workqueue_set_max_active(wq->normal->normal_wq, max); 1016 wq->normal->max_active = max;
926 if (wq->high) 1017 if (wq->high)
927 workqueue_set_max_active(wq->high->normal_wq, max); 1018 wq->high->max_active = max;
928} 1019}
929 1020
930void btrfs_set_work_high_priority(struct btrfs_work_struct *work) 1021void btrfs_set_work_high_priority(struct btrfs_work_struct *work)